FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  *
37  * Unimplemented:
38  * - Animation
39  * - ICC profile
40  * - XMP metadata
41  */
42 
43 #define BITSTREAM_READER_LE
44 #include "libavutil/imgutils.h"
45 #include "avcodec.h"
46 #include "bytestream.h"
47 #include "exif.h"
48 #include "internal.h"
49 #include "get_bits.h"
50 #include "thread.h"
51 #include "vp8.h"
52 
53 #define VP8X_FLAG_ANIMATION 0x02
54 #define VP8X_FLAG_XMP_METADATA 0x04
55 #define VP8X_FLAG_EXIF_METADATA 0x08
56 #define VP8X_FLAG_ALPHA 0x10
57 #define VP8X_FLAG_ICC 0x20
58 
59 #define MAX_PALETTE_SIZE 256
60 #define MAX_CACHE_BITS 11
61 #define NUM_CODE_LENGTH_CODES 19
62 #define HUFFMAN_CODES_PER_META_CODE 5
63 #define NUM_LITERAL_CODES 256
64 #define NUM_LENGTH_CODES 24
65 #define NUM_DISTANCE_CODES 40
66 #define NUM_SHORT_DISTANCES 120
67 #define MAX_HUFFMAN_CODE_LENGTH 15
68 
69 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
73 };
74 
76  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
77 };
78 
79 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
80  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
81  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
82  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
83  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
84  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
85  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
86  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
87  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
88  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
89  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
90  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
91  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
92  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
93  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
94  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
95 };
96 
100 };
101 
107 };
108 
114 };
115 
131 };
132 
139 };
140 
141 /* The structure of WebP lossless is an optional series of transformation data,
142  * followed by the primary image. The primary image also optionally contains
143  * an entropy group mapping if there are multiple entropy groups. There is a
144  * basic image type called an "entropy coded image" that is used for all of
145  * these. The type of each entropy coded image is referred to by the
146  * specification as its role. */
147 enum ImageRole {
148  /* Primary Image: Stores the actual pixels of the image. */
150 
151  /* Entropy Image: Defines which Huffman group to use for different areas of
152  * the primary image. */
154 
155  /* Predictors: Defines which predictor type to use for different areas of
156  * the primary image. */
158 
159  /* Color Transform Data: Defines the color transformation for different
160  * areas of the primary image. */
162 
163  /* Color Index: Stored as an image of height == 1. */
165 
167 };
168 
169 typedef struct HuffReader {
170  VLC vlc; /* Huffman decoder context */
171  int simple; /* whether to use simple mode */
172  int nb_symbols; /* number of coded symbols */
173  uint16_t simple_symbols[2]; /* symbols for simple mode */
174 } HuffReader;
175 
176 typedef struct ImageContext {
177  enum ImageRole role; /* role of this image */
178  AVFrame *frame; /* AVFrame for data */
179  int color_cache_bits; /* color cache size, log2 */
180  uint32_t *color_cache; /* color cache data */
181  int nb_huffman_groups; /* number of huffman groups */
182  HuffReader *huffman_groups; /* reader for each huffman group */
183  int size_reduction; /* relative size compared to primary image, log2 */
185 } ImageContext;
186 
187 typedef struct WebPContext {
188  VP8Context v; /* VP8 Context used for lossy decoding */
189  GetBitContext gb; /* bitstream reader for main image chunk */
190  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
191  AVCodecContext *avctx; /* parent AVCodecContext */
192  int initialized; /* set once the VP8 context is initialized */
193  int has_alpha; /* has a separate alpha chunk */
194  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
195  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
196  uint8_t *alpha_data; /* alpha chunk data */
197  int alpha_data_size; /* alpha chunk data size */
198  int has_exif; /* set after an EXIF chunk has been processed */
199  AVDictionary *exif_metadata; /* EXIF chunk data */
200  int width; /* image width */
201  int height; /* image height */
202  int lossless; /* indicates lossless or lossy */
203 
204  int nb_transforms; /* number of transforms */
205  enum TransformType transforms[4]; /* transformations used in the image, in order */
206  int reduced_width; /* reduced width for index image, if applicable */
207  int nb_huffman_groups; /* number of huffman groups in the primary image */
208  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
209 } WebPContext;
210 
211 #define GET_PIXEL(frame, x, y) \
212  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
213 
214 #define GET_PIXEL_COMP(frame, x, y, c) \
215  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
216 
218 {
219  int i, j;
220 
221  av_free(img->color_cache);
222  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
223  av_frame_free(&img->frame);
224  if (img->huffman_groups) {
225  for (i = 0; i < img->nb_huffman_groups; i++) {
226  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
227  ff_free_vlc(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
228  }
229  av_free(img->huffman_groups);
230  }
231  memset(img, 0, sizeof(*img));
232 }
233 
234 
235 /* Differs from get_vlc2() in the following ways:
236  * - codes are bit-reversed
237  * - assumes 8-bit table to make reversal simpler
238  * - assumes max depth of 2 since the max code length for WebP is 15
239  */
241 {
242  int n, nb_bits;
243  unsigned int index;
244  int code;
245 
246  OPEN_READER(re, gb);
247  UPDATE_CACHE(re, gb);
248 
249  index = SHOW_UBITS(re, gb, 8);
250  index = ff_reverse[index];
251  code = table[index][0];
252  n = table[index][1];
253 
254  if (n < 0) {
255  LAST_SKIP_BITS(re, gb, 8);
256  UPDATE_CACHE(re, gb);
257 
258  nb_bits = -n;
259 
260  index = SHOW_UBITS(re, gb, nb_bits);
261  index = (ff_reverse[index] >> (8 - nb_bits)) + code;
262  code = table[index][0];
263  n = table[index][1];
264  }
265  SKIP_BITS(re, gb, n);
266 
267  CLOSE_READER(re, gb);
268 
269  return code;
270 }
271 
273 {
274  if (r->simple) {
275  if (r->nb_symbols == 1)
276  return r->simple_symbols[0];
277  else
278  return r->simple_symbols[get_bits1(gb)];
279  } else
280  return webp_get_vlc(gb, r->vlc.table);
281 }
282 
283 static int huff_reader_build_canonical(HuffReader *r, int *code_lengths,
284  int alphabet_size)
285 {
286  int len = 0, sym, code = 0, ret;
287  int max_code_length = 0;
288  uint16_t *codes;
289 
290  /* special-case 1 symbol since the vlc reader cannot handle it */
291  for (sym = 0; sym < alphabet_size; sym++) {
292  if (code_lengths[sym] > 0) {
293  len++;
294  code = sym;
295  if (len > 1)
296  break;
297  }
298  }
299  if (len == 1) {
300  r->nb_symbols = 1;
301  r->simple_symbols[0] = code;
302  r->simple = 1;
303  return 0;
304  }
305 
306  for (sym = 0; sym < alphabet_size; sym++)
307  max_code_length = FFMAX(max_code_length, code_lengths[sym]);
308 
309  if (max_code_length == 0 || max_code_length > MAX_HUFFMAN_CODE_LENGTH)
310  return AVERROR(EINVAL);
311 
312  codes = av_malloc(alphabet_size * sizeof(*codes));
313  if (!codes)
314  return AVERROR(ENOMEM);
315 
316  code = 0;
317  r->nb_symbols = 0;
318  for (len = 1; len <= max_code_length; len++) {
319  for (sym = 0; sym < alphabet_size; sym++) {
320  if (code_lengths[sym] != len)
321  continue;
322  codes[sym] = code++;
323  r->nb_symbols++;
324  }
325  code <<= 1;
326  }
327  if (!r->nb_symbols) {
328  av_free(codes);
329  return AVERROR_INVALIDDATA;
330  }
331 
332  ret = init_vlc(&r->vlc, 8, alphabet_size,
333  code_lengths, sizeof(*code_lengths), sizeof(*code_lengths),
334  codes, sizeof(*codes), sizeof(*codes), 0);
335  if (ret < 0) {
336  av_free(codes);
337  return ret;
338  }
339  r->simple = 0;
340 
341  av_free(codes);
342  return 0;
343 }
344 
346 {
347  hc->nb_symbols = get_bits1(&s->gb) + 1;
348 
349  if (get_bits1(&s->gb))
350  hc->simple_symbols[0] = get_bits(&s->gb, 8);
351  else
352  hc->simple_symbols[0] = get_bits1(&s->gb);
353 
354  if (hc->nb_symbols == 2)
355  hc->simple_symbols[1] = get_bits(&s->gb, 8);
356 
357  hc->simple = 1;
358 }
359 
361  int alphabet_size)
362 {
363  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
364  int *code_lengths = NULL;
365  int code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
366  int i, symbol, max_symbol, prev_code_len, ret;
367  int num_codes = 4 + get_bits(&s->gb, 4);
368 
369  if (num_codes > NUM_CODE_LENGTH_CODES)
370  return AVERROR_INVALIDDATA;
371 
372  for (i = 0; i < num_codes; i++)
373  code_length_code_lengths[code_length_code_order[i]] = get_bits(&s->gb, 3);
374 
375  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths,
377  if (ret < 0)
378  goto finish;
379 
380  code_lengths = av_mallocz_array(alphabet_size, sizeof(*code_lengths));
381  if (!code_lengths) {
382  ret = AVERROR(ENOMEM);
383  goto finish;
384  }
385 
386  if (get_bits1(&s->gb)) {
387  int bits = 2 + 2 * get_bits(&s->gb, 3);
388  max_symbol = 2 + get_bits(&s->gb, bits);
389  if (max_symbol > alphabet_size) {
390  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
391  max_symbol, alphabet_size);
392  ret = AVERROR_INVALIDDATA;
393  goto finish;
394  }
395  } else {
396  max_symbol = alphabet_size;
397  }
398 
399  prev_code_len = 8;
400  symbol = 0;
401  while (symbol < alphabet_size) {
402  int code_len;
403 
404  if (!max_symbol--)
405  break;
406  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
407  if (code_len < 16) {
408  /* Code length code [0..15] indicates literal code lengths. */
409  code_lengths[symbol++] = code_len;
410  if (code_len)
411  prev_code_len = code_len;
412  } else {
413  int repeat = 0, length = 0;
414  switch (code_len) {
415  case 16:
416  /* Code 16 repeats the previous non-zero value [3..6] times,
417  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
418  * non-zero value has been emitted, a value of 8 is repeated. */
419  repeat = 3 + get_bits(&s->gb, 2);
420  length = prev_code_len;
421  break;
422  case 17:
423  /* Code 17 emits a streak of zeros [3..10], i.e.,
424  * 3 + ReadBits(3) times. */
425  repeat = 3 + get_bits(&s->gb, 3);
426  break;
427  case 18:
428  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
429  * 11 + ReadBits(7) times. */
430  repeat = 11 + get_bits(&s->gb, 7);
431  break;
432  }
433  if (symbol + repeat > alphabet_size) {
435  "invalid symbol %d + repeat %d > alphabet size %d\n",
436  symbol, repeat, alphabet_size);
437  ret = AVERROR_INVALIDDATA;
438  goto finish;
439  }
440  while (repeat-- > 0)
441  code_lengths[symbol++] = length;
442  }
443  }
444 
445  ret = huff_reader_build_canonical(hc, code_lengths, alphabet_size);
446 
447 finish:
448  ff_free_vlc(&code_len_hc.vlc);
449  av_free(code_lengths);
450  return ret;
451 }
452 
453 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
454  int w, int h);
455 
456 #define PARSE_BLOCK_SIZE(w, h) do { \
457  block_bits = get_bits(&s->gb, 3) + 2; \
458  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
459  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
460 } while (0)
461 
463 {
464  ImageContext *img;
465  int ret, block_bits, width, blocks_w, blocks_h, x, y, max;
466 
467  width = s->width;
468  if (s->reduced_width > 0)
469  width = s->reduced_width;
470 
471  PARSE_BLOCK_SIZE(width, s->height);
472 
473  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
474  if (ret < 0)
475  return ret;
476 
477  img = &s->image[IMAGE_ROLE_ENTROPY];
478  img->size_reduction = block_bits;
479 
480  /* the number of huffman groups is determined by the maximum group number
481  * coded in the entropy image */
482  max = 0;
483  for (y = 0; y < img->frame->height; y++) {
484  for (x = 0; x < img->frame->width; x++) {
485  int p = GET_PIXEL_COMP(img->frame, x, y, 2);
486  max = FFMAX(max, p);
487  }
488  }
489  s->nb_huffman_groups = max + 1;
490 
491  return 0;
492 }
493 
495 {
496  int block_bits, blocks_w, blocks_h, ret;
497 
498  PARSE_BLOCK_SIZE(s->width, s->height);
499 
501  blocks_h);
502  if (ret < 0)
503  return ret;
504 
505  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
506 
507  return 0;
508 }
509 
511 {
512  int block_bits, blocks_w, blocks_h, ret;
513 
514  PARSE_BLOCK_SIZE(s->width, s->height);
515 
517  blocks_h);
518  if (ret < 0)
519  return ret;
520 
522 
523  return 0;
524 }
525 
527 {
528  ImageContext *img;
529  int width_bits, index_size, ret, x;
530  uint8_t *ct;
531 
532  index_size = get_bits(&s->gb, 8) + 1;
533 
534  if (index_size <= 2)
535  width_bits = 3;
536  else if (index_size <= 4)
537  width_bits = 2;
538  else if (index_size <= 16)
539  width_bits = 1;
540  else
541  width_bits = 0;
542 
544  index_size, 1);
545  if (ret < 0)
546  return ret;
547 
548  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
549  img->size_reduction = width_bits;
550  if (width_bits > 0)
551  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
552 
553  /* color index values are delta-coded */
554  ct = img->frame->data[0] + 4;
555  for (x = 4; x < img->frame->width * 4; x++, ct++)
556  ct[0] += ct[-4];
557 
558  return 0;
559 }
560 
562  int x, int y)
563 {
565  int group = 0;
566 
567  if (gimg->size_reduction > 0) {
568  int group_x = x >> gimg->size_reduction;
569  int group_y = y >> gimg->size_reduction;
570  group = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
571  }
572 
573  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
574 }
575 
577 {
578  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
579  img->color_cache[cache_idx] = c;
580 }
581 
583  int w, int h)
584 {
585  ImageContext *img;
586  HuffReader *hg;
587  int i, j, ret, x, y, width;
588 
589  img = &s->image[role];
590  img->role = role;
591 
592  if (!img->frame) {
593  img->frame = av_frame_alloc();
594  if (!img->frame)
595  return AVERROR(ENOMEM);
596  }
597 
598  img->frame->format = AV_PIX_FMT_ARGB;
599  img->frame->width = w;
600  img->frame->height = h;
601 
602  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
603  ThreadFrame pt = { .f = img->frame };
604  ret = ff_thread_get_buffer(s->avctx, &pt, 0);
605  } else
606  ret = av_frame_get_buffer(img->frame, 1);
607  if (ret < 0)
608  return ret;
609 
610  if (get_bits1(&s->gb)) {
611  img->color_cache_bits = get_bits(&s->gb, 4);
612  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
613  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
614  img->color_cache_bits);
615  return AVERROR_INVALIDDATA;
616  }
617  img->color_cache = av_mallocz_array(1 << img->color_cache_bits,
618  sizeof(*img->color_cache));
619  if (!img->color_cache)
620  return AVERROR(ENOMEM);
621  } else {
622  img->color_cache_bits = 0;
623  }
624 
625  img->nb_huffman_groups = 1;
626  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
627  ret = decode_entropy_image(s);
628  if (ret < 0)
629  return ret;
631  }
632  img->huffman_groups = av_mallocz_array(img->nb_huffman_groups *
634  sizeof(*img->huffman_groups));
635  if (!img->huffman_groups)
636  return AVERROR(ENOMEM);
637 
638  for (i = 0; i < img->nb_huffman_groups; i++) {
640  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
641  int alphabet_size = alphabet_sizes[j];
642  if (!j && img->color_cache_bits > 0)
643  alphabet_size += 1 << img->color_cache_bits;
644 
645  if (get_bits1(&s->gb)) {
646  read_huffman_code_simple(s, &hg[j]);
647  } else {
648  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
649  if (ret < 0)
650  return ret;
651  }
652  }
653  }
654 
655  width = img->frame->width;
656  if (role == IMAGE_ROLE_ARGB && s->reduced_width > 0)
657  width = s->reduced_width;
658 
659  x = 0; y = 0;
660  while (y < img->frame->height) {
661  int v;
662 
663  hg = get_huffman_group(s, img, x, y);
665  if (v < NUM_LITERAL_CODES) {
666  /* literal pixel values */
667  uint8_t *p = GET_PIXEL(img->frame, x, y);
668  p[2] = v;
669  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
670  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
671  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
672  if (img->color_cache_bits)
673  color_cache_put(img, AV_RB32(p));
674  x++;
675  if (x == width) {
676  x = 0;
677  y++;
678  }
679  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
680  /* LZ77 backwards mapping */
681  int prefix_code, length, distance, ref_x, ref_y;
682 
683  /* parse length and distance */
684  prefix_code = v - NUM_LITERAL_CODES;
685  if (prefix_code < 4) {
686  length = prefix_code + 1;
687  } else {
688  int extra_bits = (prefix_code - 2) >> 1;
689  int offset = 2 + (prefix_code & 1) << extra_bits;
690  length = offset + get_bits(&s->gb, extra_bits) + 1;
691  }
692  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
693  if (prefix_code < 4) {
694  distance = prefix_code + 1;
695  } else {
696  int extra_bits = prefix_code - 2 >> 1;
697  int offset = 2 + (prefix_code & 1) << extra_bits;
698  distance = offset + get_bits(&s->gb, extra_bits) + 1;
699  }
700 
701  /* find reference location */
702  if (distance <= NUM_SHORT_DISTANCES) {
703  int xi = lz77_distance_offsets[distance - 1][0];
704  int yi = lz77_distance_offsets[distance - 1][1];
705  distance = FFMAX(1, xi + yi * width);
706  } else {
707  distance -= NUM_SHORT_DISTANCES;
708  }
709  ref_x = x;
710  ref_y = y;
711  if (distance <= x) {
712  ref_x -= distance;
713  distance = 0;
714  } else {
715  ref_x = 0;
716  distance -= x;
717  }
718  while (distance >= width) {
719  ref_y--;
720  distance -= width;
721  }
722  if (distance > 0) {
723  ref_x = width - distance;
724  ref_y--;
725  }
726  ref_x = FFMAX(0, ref_x);
727  ref_y = FFMAX(0, ref_y);
728 
729  /* copy pixels
730  * source and dest regions can overlap and wrap lines, so just
731  * copy per-pixel */
732  for (i = 0; i < length; i++) {
733  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
734  uint8_t *p = GET_PIXEL(img->frame, x, y);
735 
736  AV_COPY32(p, p_ref);
737  if (img->color_cache_bits)
738  color_cache_put(img, AV_RB32(p));
739  x++;
740  ref_x++;
741  if (x == width) {
742  x = 0;
743  y++;
744  }
745  if (ref_x == width) {
746  ref_x = 0;
747  ref_y++;
748  }
749  if (y == img->frame->height || ref_y == img->frame->height)
750  break;
751  }
752  } else {
753  /* read from color cache */
754  uint8_t *p = GET_PIXEL(img->frame, x, y);
755  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
756 
757  if (!img->color_cache_bits) {
758  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
759  return AVERROR_INVALIDDATA;
760  }
761  if (cache_idx >= 1 << img->color_cache_bits) {
763  "color cache index out-of-bounds\n");
764  return AVERROR_INVALIDDATA;
765  }
766  AV_WB32(p, img->color_cache[cache_idx]);
767  x++;
768  if (x == width) {
769  x = 0;
770  y++;
771  }
772  }
773  }
774 
775  return 0;
776 }
777 
778 /* PRED_MODE_BLACK */
779 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
780  const uint8_t *p_t, const uint8_t *p_tr)
781 {
782  AV_WB32(p, 0xFF000000);
783 }
784 
785 /* PRED_MODE_L */
786 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
787  const uint8_t *p_t, const uint8_t *p_tr)
788 {
789  AV_COPY32(p, p_l);
790 }
791 
792 /* PRED_MODE_T */
793 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
794  const uint8_t *p_t, const uint8_t *p_tr)
795 {
796  AV_COPY32(p, p_t);
797 }
798 
799 /* PRED_MODE_TR */
800 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
801  const uint8_t *p_t, const uint8_t *p_tr)
802 {
803  AV_COPY32(p, p_tr);
804 }
805 
806 /* PRED_MODE_TL */
807 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
808  const uint8_t *p_t, const uint8_t *p_tr)
809 {
810  AV_COPY32(p, p_tl);
811 }
812 
813 /* PRED_MODE_AVG_T_AVG_L_TR */
814 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
815  const uint8_t *p_t, const uint8_t *p_tr)
816 {
817  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
818  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
819  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
820  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
821 }
822 
823 /* PRED_MODE_AVG_L_TL */
824 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
825  const uint8_t *p_t, const uint8_t *p_tr)
826 {
827  p[0] = p_l[0] + p_tl[0] >> 1;
828  p[1] = p_l[1] + p_tl[1] >> 1;
829  p[2] = p_l[2] + p_tl[2] >> 1;
830  p[3] = p_l[3] + p_tl[3] >> 1;
831 }
832 
833 /* PRED_MODE_AVG_L_T */
834 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
835  const uint8_t *p_t, const uint8_t *p_tr)
836 {
837  p[0] = p_l[0] + p_t[0] >> 1;
838  p[1] = p_l[1] + p_t[1] >> 1;
839  p[2] = p_l[2] + p_t[2] >> 1;
840  p[3] = p_l[3] + p_t[3] >> 1;
841 }
842 
843 /* PRED_MODE_AVG_TL_T */
844 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
845  const uint8_t *p_t, const uint8_t *p_tr)
846 {
847  p[0] = p_tl[0] + p_t[0] >> 1;
848  p[1] = p_tl[1] + p_t[1] >> 1;
849  p[2] = p_tl[2] + p_t[2] >> 1;
850  p[3] = p_tl[3] + p_t[3] >> 1;
851 }
852 
853 /* PRED_MODE_AVG_T_TR */
854 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
855  const uint8_t *p_t, const uint8_t *p_tr)
856 {
857  p[0] = p_t[0] + p_tr[0] >> 1;
858  p[1] = p_t[1] + p_tr[1] >> 1;
859  p[2] = p_t[2] + p_tr[2] >> 1;
860  p[3] = p_t[3] + p_tr[3] >> 1;
861 }
862 
863 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
864 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
865  const uint8_t *p_t, const uint8_t *p_tr)
866 {
867  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
868  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
869  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
870  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
871 }
872 
873 /* PRED_MODE_SELECT */
874 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
875  const uint8_t *p_t, const uint8_t *p_tr)
876 {
877  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
878  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
879  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
880  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
881  if (diff <= 0)
882  AV_COPY32(p, p_t);
883  else
884  AV_COPY32(p, p_l);
885 }
886 
887 /* PRED_MODE_ADD_SUBTRACT_FULL */
888 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
889  const uint8_t *p_t, const uint8_t *p_tr)
890 {
891  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
892  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
893  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
894  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
895 }
896 
898 {
899  int d = a + b >> 1;
900  return av_clip_uint8(d + (d - c) / 2);
901 }
902 
903 /* PRED_MODE_ADD_SUBTRACT_HALF */
904 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
905  const uint8_t *p_t, const uint8_t *p_tr)
906 {
907  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
908  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
909  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
910  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
911 }
912 
913 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
914  const uint8_t *p_tl, const uint8_t *p_t,
915  const uint8_t *p_tr);
916 
917 static const inv_predict_func inverse_predict[14] = {
922 };
923 
924 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
925 {
926  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
927  uint8_t p[4];
928 
929  dec = GET_PIXEL(frame, x, y);
930  p_l = GET_PIXEL(frame, x - 1, y);
931  p_tl = GET_PIXEL(frame, x - 1, y - 1);
932  p_t = GET_PIXEL(frame, x, y - 1);
933  if (x == frame->width - 1)
934  p_tr = GET_PIXEL(frame, 0, y);
935  else
936  p_tr = GET_PIXEL(frame, x + 1, y - 1);
937 
938  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
939 
940  dec[0] += p[0];
941  dec[1] += p[1];
942  dec[2] += p[2];
943  dec[3] += p[3];
944 }
945 
947 {
950  int x, y;
951 
952  for (y = 0; y < img->frame->height; y++) {
953  for (x = 0; x < img->frame->width; x++) {
954  int tx = x >> pimg->size_reduction;
955  int ty = y >> pimg->size_reduction;
956  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
957 
958  if (x == 0) {
959  if (y == 0)
960  m = PRED_MODE_BLACK;
961  else
962  m = PRED_MODE_T;
963  } else if (y == 0)
964  m = PRED_MODE_L;
965 
966  if (m > 13) {
968  "invalid predictor mode: %d\n", m);
969  return AVERROR_INVALIDDATA;
970  }
971  inverse_prediction(img->frame, m, x, y);
972  }
973  }
974  return 0;
975 }
976 
978  uint8_t color)
979 {
980  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
981 }
982 
984 {
985  ImageContext *img, *cimg;
986  int x, y, cx, cy;
987  uint8_t *p, *cp;
988 
989  img = &s->image[IMAGE_ROLE_ARGB];
990  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
991 
992  for (y = 0; y < img->frame->height; y++) {
993  for (x = 0; x < img->frame->width; x++) {
994  cx = x >> cimg->size_reduction;
995  cy = y >> cimg->size_reduction;
996  cp = GET_PIXEL(cimg->frame, cx, cy);
997  p = GET_PIXEL(img->frame, x, y);
998 
999  p[1] += color_transform_delta(cp[3], p[2]);
1000  p[3] += color_transform_delta(cp[2], p[2]) +
1001  color_transform_delta(cp[1], p[1]);
1002  }
1003  }
1004  return 0;
1005 }
1006 
1008 {
1009  int x, y;
1011 
1012  for (y = 0; y < img->frame->height; y++) {
1013  for (x = 0; x < img->frame->width; x++) {
1014  uint8_t *p = GET_PIXEL(img->frame, x, y);
1015  p[1] += p[2];
1016  p[3] += p[2];
1017  }
1018  }
1019  return 0;
1020 }
1021 
1023 {
1024  ImageContext *img;
1025  ImageContext *pal;
1026  int i, x, y;
1027  uint8_t *p, *pi;
1028 
1029  img = &s->image[IMAGE_ROLE_ARGB];
1030  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1031 
1032  if (pal->size_reduction > 0) {
1033  GetBitContext gb_g;
1034  uint8_t *line;
1035  int pixel_bits = 8 >> pal->size_reduction;
1036 
1037  line = av_malloc(img->frame->linesize[0]);
1038  if (!line)
1039  return AVERROR(ENOMEM);
1040 
1041  for (y = 0; y < img->frame->height; y++) {
1042  p = GET_PIXEL(img->frame, 0, y);
1043  memcpy(line, p, img->frame->linesize[0]);
1044  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1045  skip_bits(&gb_g, 16);
1046  i = 0;
1047  for (x = 0; x < img->frame->width; x++) {
1048  p = GET_PIXEL(img->frame, x, y);
1049  p[2] = get_bits(&gb_g, pixel_bits);
1050  i++;
1051  if (i == 1 << pal->size_reduction) {
1052  skip_bits(&gb_g, 24);
1053  i = 0;
1054  }
1055  }
1056  }
1057  av_free(line);
1058  }
1059 
1060  for (y = 0; y < img->frame->height; y++) {
1061  for (x = 0; x < img->frame->width; x++) {
1062  p = GET_PIXEL(img->frame, x, y);
1063  i = p[2];
1064  if (i >= pal->frame->width) {
1065  av_log(s->avctx, AV_LOG_ERROR, "invalid palette index %d\n", i);
1066  return AVERROR_INVALIDDATA;
1067  }
1068  pi = GET_PIXEL(pal->frame, i, 0);
1069  AV_COPY32(p, pi);
1070  }
1071  }
1072 
1073  return 0;
1074 }
1075 
1077  int *got_frame, uint8_t *data_start,
1078  unsigned int data_size, int is_alpha_chunk)
1079 {
1080  WebPContext *s = avctx->priv_data;
1081  int w, h, ret, i;
1082 
1083  if (!is_alpha_chunk) {
1084  s->lossless = 1;
1085  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1086  }
1087 
1088  ret = init_get_bits(&s->gb, data_start, data_size * 8);
1089  if (ret < 0)
1090  return ret;
1091 
1092  if (!is_alpha_chunk) {
1093  if (get_bits(&s->gb, 8) != 0x2F) {
1094  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1095  return AVERROR_INVALIDDATA;
1096  }
1097 
1098  w = get_bits(&s->gb, 14) + 1;
1099  h = get_bits(&s->gb, 14) + 1;
1100  if (s->width && s->width != w) {
1101  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1102  s->width, w);
1103  }
1104  s->width = w;
1105  if (s->height && s->height != h) {
1106  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1107  s->width, w);
1108  }
1109  s->height = h;
1110 
1111  ret = ff_set_dimensions(avctx, s->width, s->height);
1112  if (ret < 0)
1113  return ret;
1114 
1115  s->has_alpha = get_bits1(&s->gb);
1116 
1117  if (get_bits(&s->gb, 3) != 0x0) {
1118  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1119  return AVERROR_INVALIDDATA;
1120  }
1121  } else {
1122  if (!s->width || !s->height)
1123  return AVERROR_BUG;
1124  w = s->width;
1125  h = s->height;
1126  }
1127 
1128  /* parse transformations */
1129  s->nb_transforms = 0;
1130  s->reduced_width = 0;
1131  while (get_bits1(&s->gb)) {
1132  enum TransformType transform = get_bits(&s->gb, 2);
1133  s->transforms[s->nb_transforms++] = transform;
1134  switch (transform) {
1135  case PREDICTOR_TRANSFORM:
1136  ret = parse_transform_predictor(s);
1137  break;
1138  case COLOR_TRANSFORM:
1139  ret = parse_transform_color(s);
1140  break;
1143  break;
1144  }
1145  if (ret < 0)
1146  goto free_and_return;
1147  }
1148 
1149  /* decode primary image */
1150  s->image[IMAGE_ROLE_ARGB].frame = p;
1151  if (is_alpha_chunk)
1154  if (ret < 0)
1155  goto free_and_return;
1156 
1157  /* apply transformations */
1158  for (i = s->nb_transforms - 1; i >= 0; i--) {
1159  switch (s->transforms[i]) {
1160  case PREDICTOR_TRANSFORM:
1161  ret = apply_predictor_transform(s);
1162  break;
1163  case COLOR_TRANSFORM:
1164  ret = apply_color_transform(s);
1165  break;
1166  case SUBTRACT_GREEN:
1168  break;
1171  break;
1172  }
1173  if (ret < 0)
1174  goto free_and_return;
1175  }
1176 
1177  *got_frame = 1;
1179  p->key_frame = 1;
1180  ret = data_size;
1181 
1182 free_and_return:
1183  for (i = 0; i < IMAGE_ROLE_NB; i++)
1184  image_ctx_free(&s->image[i]);
1185 
1186  return ret;
1187 }
1188 
1190 {
1191  int x, y, ls;
1192  uint8_t *dec;
1193 
1194  ls = frame->linesize[3];
1195 
1196  /* filter first row using horizontal filter */
1197  dec = frame->data[3] + 1;
1198  for (x = 1; x < frame->width; x++, dec++)
1199  *dec += *(dec - 1);
1200 
1201  /* filter first column using vertical filter */
1202  dec = frame->data[3] + ls;
1203  for (y = 1; y < frame->height; y++, dec += ls)
1204  *dec += *(dec - ls);
1205 
1206  /* filter the rest using the specified filter */
1207  switch (m) {
1209  for (y = 1; y < frame->height; y++) {
1210  dec = frame->data[3] + y * ls + 1;
1211  for (x = 1; x < frame->width; x++, dec++)
1212  *dec += *(dec - 1);
1213  }
1214  break;
1215  case ALPHA_FILTER_VERTICAL:
1216  for (y = 1; y < frame->height; y++) {
1217  dec = frame->data[3] + y * ls + 1;
1218  for (x = 1; x < frame->width; x++, dec++)
1219  *dec += *(dec - ls);
1220  }
1221  break;
1222  case ALPHA_FILTER_GRADIENT:
1223  for (y = 1; y < frame->height; y++) {
1224  dec = frame->data[3] + y * ls + 1;
1225  for (x = 1; x < frame->width; x++, dec++)
1226  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1227  }
1228  break;
1229  }
1230 }
1231 
1233  uint8_t *data_start,
1234  unsigned int data_size)
1235 {
1236  WebPContext *s = avctx->priv_data;
1237  int x, y, ret;
1238 
1240  GetByteContext gb;
1241 
1242  bytestream2_init(&gb, data_start, data_size);
1243  for (y = 0; y < s->height; y++)
1244  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1245  s->width);
1246  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1247  uint8_t *ap, *pp;
1248  int alpha_got_frame = 0;
1249 
1250  s->alpha_frame = av_frame_alloc();
1251  if (!s->alpha_frame)
1252  return AVERROR(ENOMEM);
1253 
1254  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1255  data_start, data_size, 1);
1256  if (ret < 0) {
1258  return ret;
1259  }
1260  if (!alpha_got_frame) {
1262  return AVERROR_INVALIDDATA;
1263  }
1264 
1265  /* copy green component of alpha image to alpha plane of primary image */
1266  for (y = 0; y < s->height; y++) {
1267  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1268  pp = p->data[3] + p->linesize[3] * y;
1269  for (x = 0; x < s->width; x++) {
1270  *pp = *ap;
1271  pp++;
1272  ap += 4;
1273  }
1274  }
1276  }
1277 
1278  /* apply alpha filtering */
1279  if (s->alpha_filter)
1281 
1282  return 0;
1283 }
1284 
1286  int *got_frame, uint8_t *data_start,
1287  unsigned int data_size)
1288 {
1289  WebPContext *s = avctx->priv_data;
1290  AVPacket pkt;
1291  int ret;
1292 
1293  if (!s->initialized) {
1294  ff_vp8_decode_init(avctx);
1295  s->initialized = 1;
1296  if (s->has_alpha)
1297  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
1298  }
1299  s->lossless = 0;
1300 
1301  if (data_size > INT_MAX) {
1302  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1303  return AVERROR_PATCHWELCOME;
1304  }
1305 
1306  av_init_packet(&pkt);
1307  pkt.data = data_start;
1308  pkt.size = data_size;
1309 
1310  ret = ff_vp8_decode_frame(avctx, p, got_frame, &pkt);
1311  if (s->has_alpha) {
1312  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1313  s->alpha_data_size);
1314  if (ret < 0)
1315  return ret;
1316  }
1317  return ret;
1318 }
1319 
1320 static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1321  AVPacket *avpkt)
1322 {
1323  AVFrame * const p = data;
1324  WebPContext *s = avctx->priv_data;
1325  GetByteContext gb;
1326  int ret;
1327  uint32_t chunk_type, chunk_size;
1328  int vp8x_flags = 0;
1329 
1330  s->avctx = avctx;
1331  s->width = 0;
1332  s->height = 0;
1333  *got_frame = 0;
1334  s->has_alpha = 0;
1335  s->has_exif = 0;
1336  bytestream2_init(&gb, avpkt->data, avpkt->size);
1337 
1338  if (bytestream2_get_bytes_left(&gb) < 12)
1339  return AVERROR_INVALIDDATA;
1340 
1341  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1342  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1343  return AVERROR_INVALIDDATA;
1344  }
1345 
1346  chunk_size = bytestream2_get_le32(&gb);
1347  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1348  return AVERROR_INVALIDDATA;
1349 
1350  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1351  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1352  return AVERROR_INVALIDDATA;
1353  }
1354 
1356  while (bytestream2_get_bytes_left(&gb) > 0) {
1357  char chunk_str[5] = { 0 };
1358 
1359  chunk_type = bytestream2_get_le32(&gb);
1360  chunk_size = bytestream2_get_le32(&gb);
1361  if (chunk_size == UINT32_MAX)
1362  return AVERROR_INVALIDDATA;
1363  chunk_size += chunk_size & 1;
1364 
1365  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1366  return AVERROR_INVALIDDATA;
1367 
1368  switch (chunk_type) {
1369  case MKTAG('V', 'P', '8', ' '):
1370  if (!*got_frame) {
1371  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1372  avpkt->data + bytestream2_tell(&gb),
1373  chunk_size);
1374  if (ret < 0)
1375  return ret;
1376  }
1377  bytestream2_skip(&gb, chunk_size);
1378  break;
1379  case MKTAG('V', 'P', '8', 'L'):
1380  if (!*got_frame) {
1381  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1382  avpkt->data + bytestream2_tell(&gb),
1383  chunk_size, 0);
1384  if (ret < 0)
1385  return ret;
1386  }
1387  bytestream2_skip(&gb, chunk_size);
1388  break;
1389  case MKTAG('V', 'P', '8', 'X'):
1390  vp8x_flags = bytestream2_get_byte(&gb);
1391  bytestream2_skip(&gb, 3);
1392  s->width = bytestream2_get_le24(&gb) + 1;
1393  s->height = bytestream2_get_le24(&gb) + 1;
1394  ret = av_image_check_size(s->width, s->height, 0, avctx);
1395  if (ret < 0)
1396  return ret;
1397  break;
1398  case MKTAG('A', 'L', 'P', 'H'): {
1399  int alpha_header, filter_m, compression;
1400 
1401  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1402  av_log(avctx, AV_LOG_WARNING,
1403  "ALPHA chunk present, but alpha bit not set in the "
1404  "VP8X header\n");
1405  }
1406  if (chunk_size == 0) {
1407  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1408  return AVERROR_INVALIDDATA;
1409  }
1410  alpha_header = bytestream2_get_byte(&gb);
1411  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1412  s->alpha_data_size = chunk_size - 1;
1414 
1415  filter_m = (alpha_header >> 2) & 0x03;
1416  compression = alpha_header & 0x03;
1417 
1418  if (compression > ALPHA_COMPRESSION_VP8L) {
1419  av_log(avctx, AV_LOG_VERBOSE,
1420  "skipping unsupported ALPHA chunk\n");
1421  } else {
1422  s->has_alpha = 1;
1423  s->alpha_compression = compression;
1424  s->alpha_filter = filter_m;
1425  }
1426 
1427  break;
1428  }
1429  case MKTAG('E', 'X', 'I', 'F'): {
1430  int le, ifd_offset, exif_offset = bytestream2_tell(&gb);
1431  GetByteContext exif_gb;
1432 
1433  if (s->has_exif) {
1434  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1435  goto exif_end;
1436  }
1437  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1438  av_log(avctx, AV_LOG_WARNING,
1439  "EXIF chunk present, but Exif bit not set in the "
1440  "VP8X header\n");
1441 
1442  s->has_exif = 1;
1443  bytestream2_init(&exif_gb, avpkt->data + exif_offset,
1444  avpkt->size - exif_offset);
1445  if (ff_tdecode_header(&exif_gb, &le, &ifd_offset) < 0) {
1446  av_log(avctx, AV_LOG_ERROR, "invalid TIFF header "
1447  "in Exif data\n");
1448  goto exif_end;
1449  }
1450 
1451  bytestream2_seek(&exif_gb, ifd_offset, SEEK_SET);
1452  if (ff_exif_decode_ifd(avctx, &exif_gb, le, 0, &s->exif_metadata) < 0) {
1453  av_log(avctx, AV_LOG_ERROR, "error decoding Exif data\n");
1454  goto exif_end;
1455  }
1456 
1458 
1459 exif_end:
1461  bytestream2_skip(&gb, chunk_size);
1462  break;
1463  }
1464  case MKTAG('I', 'C', 'C', 'P'):
1465  case MKTAG('A', 'N', 'I', 'M'):
1466  case MKTAG('A', 'N', 'M', 'F'):
1467  case MKTAG('X', 'M', 'P', ' '):
1468  AV_WL32(chunk_str, chunk_type);
1469  av_log(avctx, AV_LOG_VERBOSE, "skipping unsupported chunk: %s\n",
1470  chunk_str);
1471  bytestream2_skip(&gb, chunk_size);
1472  break;
1473  default:
1474  AV_WL32(chunk_str, chunk_type);
1475  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1476  chunk_str);
1477  bytestream2_skip(&gb, chunk_size);
1478  break;
1479  }
1480  }
1481 
1482  if (!*got_frame) {
1483  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1484  return AVERROR_INVALIDDATA;
1485  }
1486 
1487  return avpkt->size;
1488 }
1489 
1491 {
1492  WebPContext *s = avctx->priv_data;
1493 
1494  if (s->initialized)
1495  return ff_vp8_decode_free(avctx);
1496 
1497  return 0;
1498 }
1499 
1501  .name = "webp",
1502  .long_name = NULL_IF_CONFIG_SMALL("WebP image"),
1503  .type = AVMEDIA_TYPE_VIDEO,
1504  .id = AV_CODEC_ID_WEBP,
1505  .priv_data_size = sizeof(WebPContext),
1508  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
1509 };