FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "decode.h"
40 #include "hwconfig.h"
41 #include "idctdsp.h"
42 #include "internal.h"
43 #include "jpegtables.h"
44 #include "mjpeg.h"
45 #include "mjpegdec.h"
46 #include "jpeglsdec.h"
47 #include "profiles.h"
48 #include "put_bits.h"
49 #include "tiff.h"
50 #include "exif.h"
51 #include "bytestream.h"
52 
53 
55 {
56  static const struct {
57  int class;
58  int index;
59  const uint8_t *bits;
60  const uint8_t *values;
61  int length;
62  } ht[] = {
64  avpriv_mjpeg_val_dc, 12 },
66  avpriv_mjpeg_val_dc, 12 },
75  };
76  int i, ret;
77 
78  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
79  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
80  ht[i].bits, ht[i].values,
81  ht[i].class == 1, s->avctx);
82  if (ret < 0)
83  return ret;
84 
85  if (ht[i].class < 2) {
86  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
87  ht[i].bits + 1, 16);
88  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
89  ht[i].values, ht[i].length);
90  }
91  }
92 
93  return 0;
94 }
95 
96 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
97 {
98  s->buggy_avid = 1;
99  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
100  s->interlace_polarity = 1;
101  if (len > 14 && buf[12] == 2) /* 2 - PAL */
102  s->interlace_polarity = 0;
103  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
104  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
105 }
106 
107 static void init_idct(AVCodecContext *avctx)
108 {
109  MJpegDecodeContext *s = avctx->priv_data;
110 
111  ff_idctdsp_init(&s->idsp, avctx);
114 }
115 
117 {
118  MJpegDecodeContext *s = avctx->priv_data;
119  int ret;
120 
121  if (!s->picture_ptr) {
122  s->picture = av_frame_alloc();
123  if (!s->picture)
124  return AVERROR(ENOMEM);
125  s->picture_ptr = s->picture;
126  }
127 
128  s->pkt = av_packet_alloc();
129  if (!s->pkt)
130  return AVERROR(ENOMEM);
131 
132  s->avctx = avctx;
133  ff_blockdsp_init(&s->bdsp, avctx);
134  ff_hpeldsp_init(&s->hdsp, avctx->flags);
135  init_idct(avctx);
136  s->buffer_size = 0;
137  s->buffer = NULL;
138  s->start_code = -1;
139  s->first_picture = 1;
140  s->got_picture = 0;
141  s->orig_height = avctx->coded_height;
143  avctx->colorspace = AVCOL_SPC_BT470BG;
145 
146  if ((ret = init_default_huffman_tables(s)) < 0)
147  return ret;
148 
149  if (s->extern_huff) {
150  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
151  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
152  return ret;
153  if (ff_mjpeg_decode_dht(s)) {
154  av_log(avctx, AV_LOG_ERROR,
155  "error using external huffman table, switching back to internal\n");
157  }
158  }
159  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
160  s->interlace_polarity = 1; /* bottom field first */
161  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
162  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
163  if (avctx->codec_tag == AV_RL32("MJPG"))
164  s->interlace_polarity = 1;
165  }
166 
167  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
168  if (avctx->extradata_size >= 4)
170 
171  if (s->smv_frames_per_jpeg <= 0) {
172  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
173  return AVERROR_INVALIDDATA;
174  }
175 
176  s->smv_frame = av_frame_alloc();
177  if (!s->smv_frame)
178  return AVERROR(ENOMEM);
179  } else if (avctx->extradata_size > 8
180  && AV_RL32(avctx->extradata) == 0x2C
181  && AV_RL32(avctx->extradata+4) == 0x18) {
182  parse_avid(s, avctx->extradata, avctx->extradata_size);
183  }
184 
185  if (avctx->codec->id == AV_CODEC_ID_AMV)
186  s->flipped = 1;
187 
188  return 0;
189 }
190 
191 
192 /* quantize tables */
194 {
195  int len, index, i;
196 
197  len = get_bits(&s->gb, 16) - 2;
198 
199  if (8*len > get_bits_left(&s->gb)) {
200  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
201  return AVERROR_INVALIDDATA;
202  }
203 
204  while (len >= 65) {
205  int pr = get_bits(&s->gb, 4);
206  if (pr > 1) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
208  return AVERROR_INVALIDDATA;
209  }
210  index = get_bits(&s->gb, 4);
211  if (index >= 4)
212  return -1;
213  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
214  /* read quant table */
215  for (i = 0; i < 64; i++) {
216  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
217  if (s->quant_matrixes[index][i] == 0) {
218  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
219  return AVERROR_INVALIDDATA;
220  }
221  }
222 
223  // XXX FIXME fine-tune, and perhaps add dc too
224  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
225  s->quant_matrixes[index][8]) >> 1;
226  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
227  index, s->qscale[index]);
228  len -= 1 + 64 * (1+pr);
229  }
230  return 0;
231 }
232 
233 /* decode huffman tables and build VLC decoders */
235 {
236  int len, index, i, class, n, v;
237  uint8_t bits_table[17];
238  uint8_t val_table[256];
239  int ret = 0;
240 
241  len = get_bits(&s->gb, 16) - 2;
242 
243  if (8*len > get_bits_left(&s->gb)) {
244  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
245  return AVERROR_INVALIDDATA;
246  }
247 
248  while (len > 0) {
249  if (len < 17)
250  return AVERROR_INVALIDDATA;
251  class = get_bits(&s->gb, 4);
252  if (class >= 2)
253  return AVERROR_INVALIDDATA;
254  index = get_bits(&s->gb, 4);
255  if (index >= 4)
256  return AVERROR_INVALIDDATA;
257  n = 0;
258  for (i = 1; i <= 16; i++) {
259  bits_table[i] = get_bits(&s->gb, 8);
260  n += bits_table[i];
261  }
262  len -= 17;
263  if (len < n || n > 256)
264  return AVERROR_INVALIDDATA;
265 
266  for (i = 0; i < n; i++) {
267  v = get_bits(&s->gb, 8);
268  val_table[i] = v;
269  }
270  len -= n;
271 
272  /* build VLC and flush previous vlc if present */
273  ff_free_vlc(&s->vlcs[class][index]);
274  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
275  class, index, n);
276  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
277  val_table, class > 0, s->avctx)) < 0)
278  return ret;
279 
280  if (class > 0) {
281  ff_free_vlc(&s->vlcs[2][index]);
282  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
283  val_table, 0, s->avctx)) < 0)
284  return ret;
285  }
286 
287  for (i = 0; i < 16; i++)
288  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
289  for (i = 0; i < 256; i++)
290  s->raw_huffman_values[class][index][i] = val_table[i];
291  }
292  return 0;
293 }
294 
296 {
297  int len, nb_components, i, width, height, bits, ret, size_change;
298  unsigned pix_fmt_id;
299  int h_count[MAX_COMPONENTS] = { 0 };
300  int v_count[MAX_COMPONENTS] = { 0 };
301 
302  s->cur_scan = 0;
303  memset(s->upscale_h, 0, sizeof(s->upscale_h));
304  memset(s->upscale_v, 0, sizeof(s->upscale_v));
305 
306  len = get_bits(&s->gb, 16);
307  bits = get_bits(&s->gb, 8);
308 
309  if (bits > 16 || bits < 1) {
310  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
311  return AVERROR_INVALIDDATA;
312  }
313 
314  if (s->avctx->bits_per_raw_sample != bits) {
315  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
317  init_idct(s->avctx);
318  }
319  if (s->pegasus_rct)
320  bits = 9;
321  if (bits == 9 && !s->pegasus_rct)
322  s->rct = 1; // FIXME ugly
323 
324  if(s->lossless && s->avctx->lowres){
325  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
326  return -1;
327  }
328 
329  height = get_bits(&s->gb, 16);
330  width = get_bits(&s->gb, 16);
331 
332  // HACK for odd_height.mov
333  if (s->interlaced && s->width == width && s->height == height + 1)
334  height= s->height;
335 
336  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
337  if (av_image_check_size(width, height, 0, s->avctx) < 0)
338  return AVERROR_INVALIDDATA;
339  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
340  return AVERROR_INVALIDDATA;
341 
342  nb_components = get_bits(&s->gb, 8);
343  if (nb_components <= 0 ||
344  nb_components > MAX_COMPONENTS)
345  return -1;
346  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
347  if (nb_components != s->nb_components) {
349  "nb_components changing in interlaced picture\n");
350  return AVERROR_INVALIDDATA;
351  }
352  }
353  if (s->ls && !(bits <= 8 || nb_components == 1)) {
355  "JPEG-LS that is not <= 8 "
356  "bits/component or 16-bit gray");
357  return AVERROR_PATCHWELCOME;
358  }
359  if (len != 8 + 3 * nb_components) {
360  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
361  return AVERROR_INVALIDDATA;
362  }
363 
364  s->nb_components = nb_components;
365  s->h_max = 1;
366  s->v_max = 1;
367  for (i = 0; i < nb_components; i++) {
368  /* component id */
369  s->component_id[i] = get_bits(&s->gb, 8) - 1;
370  h_count[i] = get_bits(&s->gb, 4);
371  v_count[i] = get_bits(&s->gb, 4);
372  /* compute hmax and vmax (only used in interleaved case) */
373  if (h_count[i] > s->h_max)
374  s->h_max = h_count[i];
375  if (v_count[i] > s->v_max)
376  s->v_max = v_count[i];
377  s->quant_index[i] = get_bits(&s->gb, 8);
378  if (s->quant_index[i] >= 4) {
379  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
380  return AVERROR_INVALIDDATA;
381  }
382  if (!h_count[i] || !v_count[i]) {
384  "Invalid sampling factor in component %d %d:%d\n",
385  i, h_count[i], v_count[i]);
386  return AVERROR_INVALIDDATA;
387  }
388 
389  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
390  i, h_count[i], v_count[i],
391  s->component_id[i], s->quant_index[i]);
392  }
393  if ( nb_components == 4
394  && s->component_id[0] == 'C' - 1
395  && s->component_id[1] == 'M' - 1
396  && s->component_id[2] == 'Y' - 1
397  && s->component_id[3] == 'K' - 1)
398  s->adobe_transform = 0;
399 
400  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
401  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
402  return AVERROR_PATCHWELCOME;
403  }
404 
405  if (s->bayer) {
406  if (nb_components == 2) {
407  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
408  width stored in their SOF3 markers is the width of each one. We only output
409  a single component, therefore we need to adjust the output image width. We
410  handle the deinterleaving (but not the debayering) in this file. */
411  width *= 2;
412  }
413  /* They can also contain 1 component, which is double the width and half the height
414  of the final image (rows are interleaved). We don't handle the decoding in this
415  file, but leave that to the TIFF/DNG decoder. */
416  }
417 
418  /* if different size, realloc/alloc picture */
419  if (width != s->width || height != s->height || bits != s->bits ||
420  memcmp(s->h_count, h_count, sizeof(h_count)) ||
421  memcmp(s->v_count, v_count, sizeof(v_count))) {
422  size_change = 1;
423 
424  s->width = width;
425  s->height = height;
426  s->bits = bits;
427  memcpy(s->h_count, h_count, sizeof(h_count));
428  memcpy(s->v_count, v_count, sizeof(v_count));
429  s->interlaced = 0;
430  s->got_picture = 0;
431 
432  /* test interlaced mode */
433  if (s->first_picture &&
434  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
435  s->orig_height != 0 &&
436  s->height < ((s->orig_height * 3) / 4)) {
437  s->interlaced = 1;
441  height *= 2;
442  }
443 
444  ret = ff_set_dimensions(s->avctx, width, height);
445  if (ret < 0)
446  return ret;
447 
448  s->first_picture = 0;
449  } else {
450  size_change = 0;
451  }
452 
453  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
454  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
455  s->orig_height < s->avctx->height)
456  s->avctx->height = s->orig_height;
457 
458  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
460  if (s->avctx->height <= 0)
461  return AVERROR_INVALIDDATA;
462  }
463 
464  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
465  if (s->progressive) {
466  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
467  return AVERROR_INVALIDDATA;
468  }
469  } else {
470  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
471  s->rgb = 1;
472  else if (!s->lossless)
473  s->rgb = 0;
474  /* XXX: not complete test ! */
475  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
476  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
477  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
478  (s->h_count[3] << 4) | s->v_count[3];
479  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
480  /* NOTE we do not allocate pictures large enough for the possible
481  * padding of h/v_count being 4 */
482  if (!(pix_fmt_id & 0xD0D0D0D0))
483  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
484  if (!(pix_fmt_id & 0x0D0D0D0D))
485  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
486 
487  for (i = 0; i < 8; i++) {
488  int j = 6 + (i&1) - (i&6);
489  int is = (pix_fmt_id >> (4*i)) & 0xF;
490  int js = (pix_fmt_id >> (4*j)) & 0xF;
491 
492  if (is == 1 && js != 2 && (i < 2 || i > 5))
493  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
494  if (is == 1 && js != 2 && (i < 2 || i > 5))
495  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
496 
497  if (is == 1 && js == 2) {
498  if (i & 1) s->upscale_h[j/2] = 1;
499  else s->upscale_v[j/2] = 1;
500  }
501  }
502 
503  if (s->bayer) {
504  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
505  goto unk_pixfmt;
506  }
507 
508  switch (pix_fmt_id) {
509  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
510  if (!s->bayer)
511  goto unk_pixfmt;
513  break;
514  case 0x11111100:
515  if (s->rgb)
517  else {
518  if ( s->adobe_transform == 0
519  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
521  } else {
525  }
526  }
527  av_assert0(s->nb_components == 3);
528  break;
529  case 0x11111111:
530  if (s->rgb)
532  else {
533  if (s->adobe_transform == 0 && s->bits <= 8) {
535  } else {
538  }
539  }
540  av_assert0(s->nb_components == 4);
541  break;
542  case 0x22111122:
543  case 0x22111111:
544  if (s->adobe_transform == 0 && s->bits <= 8) {
546  s->upscale_v[1] = s->upscale_v[2] = 1;
547  s->upscale_h[1] = s->upscale_h[2] = 1;
548  } else if (s->adobe_transform == 2 && s->bits <= 8) {
550  s->upscale_v[1] = s->upscale_v[2] = 1;
551  s->upscale_h[1] = s->upscale_h[2] = 1;
553  } else {
554  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
557  }
558  av_assert0(s->nb_components == 4);
559  break;
560  case 0x12121100:
561  case 0x22122100:
562  case 0x21211100:
563  case 0x22211200:
564  case 0x22221100:
565  case 0x22112200:
566  case 0x11222200:
568  else
569  goto unk_pixfmt;
571  break;
572  case 0x11000000:
573  case 0x13000000:
574  case 0x14000000:
575  case 0x31000000:
576  case 0x33000000:
577  case 0x34000000:
578  case 0x41000000:
579  case 0x43000000:
580  case 0x44000000:
581  if(s->bits <= 8)
583  else
585  break;
586  case 0x12111100:
587  case 0x14121200:
588  case 0x14111100:
589  case 0x22211100:
590  case 0x22112100:
591  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
592  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
593  else
594  goto unk_pixfmt;
595  s->upscale_v[0] = s->upscale_v[1] = 1;
596  } else {
597  if (pix_fmt_id == 0x14111100)
598  s->upscale_v[1] = s->upscale_v[2] = 1;
600  else
601  goto unk_pixfmt;
603  }
604  break;
605  case 0x21111100:
606  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
607  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
608  else
609  goto unk_pixfmt;
610  s->upscale_h[0] = s->upscale_h[1] = 1;
611  } else {
615  }
616  break;
617  case 0x31111100:
618  if (s->bits > 8)
619  goto unk_pixfmt;
622  s->upscale_h[1] = s->upscale_h[2] = 2;
623  break;
624  case 0x22121100:
625  case 0x22111200:
627  else
628  goto unk_pixfmt;
630  break;
631  case 0x22111100:
632  case 0x23111100:
633  case 0x42111100:
634  case 0x24111100:
638  if (pix_fmt_id == 0x42111100) {
639  if (s->bits > 8)
640  goto unk_pixfmt;
641  s->upscale_h[1] = s->upscale_h[2] = 1;
642  } else if (pix_fmt_id == 0x24111100) {
643  if (s->bits > 8)
644  goto unk_pixfmt;
645  s->upscale_v[1] = s->upscale_v[2] = 1;
646  } else if (pix_fmt_id == 0x23111100) {
647  if (s->bits > 8)
648  goto unk_pixfmt;
649  s->upscale_v[1] = s->upscale_v[2] = 2;
650  }
651  break;
652  case 0x41111100:
654  else
655  goto unk_pixfmt;
657  break;
658  default:
659  unk_pixfmt:
660  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
661  memset(s->upscale_h, 0, sizeof(s->upscale_h));
662  memset(s->upscale_v, 0, sizeof(s->upscale_v));
663  return AVERROR_PATCHWELCOME;
664  }
665  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
666  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
667  return AVERROR_PATCHWELCOME;
668  }
669  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
670  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
671  return AVERROR_PATCHWELCOME;
672  }
673  if (s->ls) {
674  memset(s->upscale_h, 0, sizeof(s->upscale_h));
675  memset(s->upscale_v, 0, sizeof(s->upscale_v));
676  if (s->nb_components == 3) {
678  } else if (s->nb_components != 1) {
679  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
680  return AVERROR_PATCHWELCOME;
681  } else if (s->palette_index && s->bits <= 8)
683  else if (s->bits <= 8)
685  else
687  }
688 
690  if (!s->pix_desc) {
691  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
692  return AVERROR_BUG;
693  }
694 
695  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
696  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
697  } else {
698  enum AVPixelFormat pix_fmts[] = {
699 #if CONFIG_MJPEG_NVDEC_HWACCEL
701 #endif
702 #if CONFIG_MJPEG_VAAPI_HWACCEL
704 #endif
705  s->avctx->pix_fmt,
707  };
708  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
709  if (s->hwaccel_pix_fmt < 0)
710  return AVERROR(EINVAL);
711 
713  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
714  }
715 
716  if (s->avctx->skip_frame == AVDISCARD_ALL) {
718  s->picture_ptr->key_frame = 1;
719  s->got_picture = 1;
720  return 0;
721  }
722 
725  return -1;
727  s->picture_ptr->key_frame = 1;
728  s->got_picture = 1;
729 
730  for (i = 0; i < 4; i++)
731  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
732 
733  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
734  s->width, s->height, s->linesize[0], s->linesize[1],
735  s->interlaced, s->avctx->height);
736 
737  }
738 
739  if ((s->rgb && !s->lossless && !s->ls) ||
740  (!s->rgb && s->ls && s->nb_components > 1) ||
741  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
742  ) {
743  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
744  return AVERROR_PATCHWELCOME;
745  }
746 
747  /* totally blank picture as progressive JPEG will only add details to it */
748  if (s->progressive) {
749  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
750  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
751  for (i = 0; i < s->nb_components; i++) {
752  int size = bw * bh * s->h_count[i] * s->v_count[i];
753  av_freep(&s->blocks[i]);
754  av_freep(&s->last_nnz[i]);
755  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
756  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
757  if (!s->blocks[i] || !s->last_nnz[i])
758  return AVERROR(ENOMEM);
759  s->block_stride[i] = bw * s->h_count[i];
760  }
761  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
762  }
763 
764  if (s->avctx->hwaccel) {
767  if (!s->hwaccel_picture_private)
768  return AVERROR(ENOMEM);
769 
770  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
772  if (ret < 0)
773  return ret;
774  }
775 
776  return 0;
777 }
778 
779 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
780 {
781  int code;
782  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
783  if (code < 0 || code > 16) {
785  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
786  0, dc_index, &s->vlcs[0][dc_index]);
787  return 0xfffff;
788  }
789 
790  if (code)
791  return get_xbits(&s->gb, code);
792  else
793  return 0;
794 }
795 
796 /* decode block and dequantize */
797 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
798  int dc_index, int ac_index, uint16_t *quant_matrix)
799 {
800  int code, i, j, level, val;
801 
802  /* DC coef */
803  val = mjpeg_decode_dc(s, dc_index);
804  if (val == 0xfffff) {
805  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
806  return AVERROR_INVALIDDATA;
807  }
808  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
809  val = av_clip_int16(val);
810  s->last_dc[component] = val;
811  block[0] = val;
812  /* AC coefs */
813  i = 0;
814  {OPEN_READER(re, &s->gb);
815  do {
816  UPDATE_CACHE(re, &s->gb);
817  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
818 
819  i += ((unsigned)code) >> 4;
820  code &= 0xf;
821  if (code) {
822  if (code > MIN_CACHE_BITS - 16)
823  UPDATE_CACHE(re, &s->gb);
824 
825  {
826  int cache = GET_CACHE(re, &s->gb);
827  int sign = (~cache) >> 31;
828  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
829  }
830 
831  LAST_SKIP_BITS(re, &s->gb, code);
832 
833  if (i > 63) {
834  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
835  return AVERROR_INVALIDDATA;
836  }
837  j = s->scantable.permutated[i];
838  block[j] = level * quant_matrix[i];
839  }
840  } while (i < 63);
841  CLOSE_READER(re, &s->gb);}
842 
843  return 0;
844 }
845 
847  int component, int dc_index,
848  uint16_t *quant_matrix, int Al)
849 {
850  unsigned val;
851  s->bdsp.clear_block(block);
852  val = mjpeg_decode_dc(s, dc_index);
853  if (val == 0xfffff) {
854  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
855  return AVERROR_INVALIDDATA;
856  }
857  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
858  s->last_dc[component] = val;
859  block[0] = val;
860  return 0;
861 }
862 
863 /* decode block and dequantize - progressive JPEG version */
865  uint8_t *last_nnz, int ac_index,
866  uint16_t *quant_matrix,
867  int ss, int se, int Al, int *EOBRUN)
868 {
869  int code, i, j, val, run;
870  unsigned level;
871 
872  if (*EOBRUN) {
873  (*EOBRUN)--;
874  return 0;
875  }
876 
877  {
878  OPEN_READER(re, &s->gb);
879  for (i = ss; ; i++) {
880  UPDATE_CACHE(re, &s->gb);
881  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
882 
883  run = ((unsigned) code) >> 4;
884  code &= 0xF;
885  if (code) {
886  i += run;
887  if (code > MIN_CACHE_BITS - 16)
888  UPDATE_CACHE(re, &s->gb);
889 
890  {
891  int cache = GET_CACHE(re, &s->gb);
892  int sign = (~cache) >> 31;
893  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
894  }
895 
896  LAST_SKIP_BITS(re, &s->gb, code);
897 
898  if (i >= se) {
899  if (i == se) {
900  j = s->scantable.permutated[se];
901  block[j] = level * (quant_matrix[se] << Al);
902  break;
903  }
904  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
905  return AVERROR_INVALIDDATA;
906  }
907  j = s->scantable.permutated[i];
908  block[j] = level * (quant_matrix[i] << Al);
909  } else {
910  if (run == 0xF) {// ZRL - skip 15 coefficients
911  i += 15;
912  if (i >= se) {
913  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
914  return AVERROR_INVALIDDATA;
915  }
916  } else {
917  val = (1 << run);
918  if (run) {
919  UPDATE_CACHE(re, &s->gb);
920  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
921  LAST_SKIP_BITS(re, &s->gb, run);
922  }
923  *EOBRUN = val - 1;
924  break;
925  }
926  }
927  }
928  CLOSE_READER(re, &s->gb);
929  }
930 
931  if (i > *last_nnz)
932  *last_nnz = i;
933 
934  return 0;
935 }
936 
937 #define REFINE_BIT(j) { \
938  UPDATE_CACHE(re, &s->gb); \
939  sign = block[j] >> 15; \
940  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
941  ((quant_matrix[i] ^ sign) - sign) << Al; \
942  LAST_SKIP_BITS(re, &s->gb, 1); \
943 }
944 
945 #define ZERO_RUN \
946 for (; ; i++) { \
947  if (i > last) { \
948  i += run; \
949  if (i > se) { \
950  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
951  return -1; \
952  } \
953  break; \
954  } \
955  j = s->scantable.permutated[i]; \
956  if (block[j]) \
957  REFINE_BIT(j) \
958  else if (run-- == 0) \
959  break; \
960 }
961 
962 /* decode block and dequantize - progressive JPEG refinement pass */
964  uint8_t *last_nnz,
965  int ac_index, uint16_t *quant_matrix,
966  int ss, int se, int Al, int *EOBRUN)
967 {
968  int code, i = ss, j, sign, val, run;
969  int last = FFMIN(se, *last_nnz);
970 
971  OPEN_READER(re, &s->gb);
972  if (*EOBRUN) {
973  (*EOBRUN)--;
974  } else {
975  for (; ; i++) {
976  UPDATE_CACHE(re, &s->gb);
977  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
978 
979  if (code & 0xF) {
980  run = ((unsigned) code) >> 4;
981  UPDATE_CACHE(re, &s->gb);
982  val = SHOW_UBITS(re, &s->gb, 1);
983  LAST_SKIP_BITS(re, &s->gb, 1);
984  ZERO_RUN;
985  j = s->scantable.permutated[i];
986  val--;
987  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
988  if (i == se) {
989  if (i > *last_nnz)
990  *last_nnz = i;
991  CLOSE_READER(re, &s->gb);
992  return 0;
993  }
994  } else {
995  run = ((unsigned) code) >> 4;
996  if (run == 0xF) {
997  ZERO_RUN;
998  } else {
999  val = run;
1000  run = (1 << run);
1001  if (val) {
1002  UPDATE_CACHE(re, &s->gb);
1003  run += SHOW_UBITS(re, &s->gb, val);
1004  LAST_SKIP_BITS(re, &s->gb, val);
1005  }
1006  *EOBRUN = run - 1;
1007  break;
1008  }
1009  }
1010  }
1011 
1012  if (i > *last_nnz)
1013  *last_nnz = i;
1014  }
1015 
1016  for (; i <= last; i++) {
1017  j = s->scantable.permutated[i];
1018  if (block[j])
1019  REFINE_BIT(j)
1020  }
1021  CLOSE_READER(re, &s->gb);
1022 
1023  return 0;
1024 }
1025 #undef REFINE_BIT
1026 #undef ZERO_RUN
1027 
1028 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1029 {
1030  int i;
1031  int reset = 0;
1032 
1033  if (s->restart_interval) {
1034  s->restart_count--;
1035  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1036  align_get_bits(&s->gb);
1037  for (i = 0; i < nb_components; i++) /* reset dc */
1038  s->last_dc[i] = (4 << s->bits);
1039  }
1040 
1041  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1042  /* skip RSTn */
1043  if (s->restart_count == 0) {
1044  if( show_bits(&s->gb, i) == (1 << i) - 1
1045  || show_bits(&s->gb, i) == 0xFF) {
1046  int pos = get_bits_count(&s->gb);
1047  align_get_bits(&s->gb);
1048  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1049  skip_bits(&s->gb, 8);
1050  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1051  for (i = 0; i < nb_components; i++) /* reset dc */
1052  s->last_dc[i] = (4 << s->bits);
1053  reset = 1;
1054  } else
1055  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1056  }
1057  }
1058  }
1059  return reset;
1060 }
1061 
1062 /* Handles 1 to 4 components */
1063 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1064 {
1065  int i, mb_x, mb_y;
1066  unsigned width;
1067  uint16_t (*buffer)[4];
1068  int left[4], top[4], topleft[4];
1069  const int linesize = s->linesize[0];
1070  const int mask = ((1 << s->bits) - 1) << point_transform;
1071  int resync_mb_y = 0;
1072  int resync_mb_x = 0;
1073  int vpred[6];
1074 
1075  if (!s->bayer && s->nb_components < 3)
1076  return AVERROR_INVALIDDATA;
1077  if (s->bayer && s->nb_components > 2)
1078  return AVERROR_INVALIDDATA;
1079  if (s->nb_components <= 0 || s->nb_components > 4)
1080  return AVERROR_INVALIDDATA;
1081  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1082  return AVERROR_INVALIDDATA;
1083 
1084 
1086 
1087  if (s->restart_interval == 0)
1088  s->restart_interval = INT_MAX;
1089 
1090  if (s->bayer)
1091  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1092  else
1093  width = s->mb_width;
1094 
1095  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1096  if (!s->ljpeg_buffer)
1097  return AVERROR(ENOMEM);
1098 
1099  buffer = s->ljpeg_buffer;
1100 
1101  for (i = 0; i < 4; i++)
1102  buffer[0][i] = 1 << (s->bits - 1);
1103 
1104  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1105  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1106 
1107  if (s->interlaced && s->bottom_field)
1108  ptr += linesize >> 1;
1109 
1110  for (i = 0; i < 4; i++)
1111  top[i] = left[i] = topleft[i] = buffer[0][i];
1112 
1113  if ((mb_y * s->width) % s->restart_interval == 0) {
1114  for (i = 0; i < 6; i++)
1115  vpred[i] = 1 << (s->bits-1);
1116  }
1117 
1118  for (mb_x = 0; mb_x < width; mb_x++) {
1119  int modified_predictor = predictor;
1120 
1121  if (get_bits_left(&s->gb) < 1) {
1122  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1123  return AVERROR_INVALIDDATA;
1124  }
1125 
1126  if (s->restart_interval && !s->restart_count){
1128  resync_mb_x = mb_x;
1129  resync_mb_y = mb_y;
1130  for(i=0; i<4; i++)
1131  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1132  }
1133  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1134  modified_predictor = 1;
1135 
1136  for (i=0;i<nb_components;i++) {
1137  int pred, dc;
1138 
1139  topleft[i] = top[i];
1140  top[i] = buffer[mb_x][i];
1141 
1142  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1143  if(dc == 0xFFFFF)
1144  return -1;
1145 
1146  if (!s->bayer || mb_x) {
1147  pred = left[i];
1148  } else { /* This path runs only for the first line in bayer images */
1149  vpred[i] += dc;
1150  pred = vpred[i] - dc;
1151  }
1152 
1153  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1154 
1155  left[i] = buffer[mb_x][i] =
1156  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1157  }
1158 
1159  if (s->restart_interval && !--s->restart_count) {
1160  align_get_bits(&s->gb);
1161  skip_bits(&s->gb, 16); /* skip RSTn */
1162  }
1163  }
1164  if (s->rct && s->nb_components == 4) {
1165  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1166  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1167  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1168  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1169  ptr[4*mb_x + 0] = buffer[mb_x][3];
1170  }
1171  } else if (s->nb_components == 4) {
1172  for(i=0; i<nb_components; i++) {
1173  int c= s->comp_index[i];
1174  if (s->bits <= 8) {
1175  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1176  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1177  }
1178  } else if(s->bits == 9) {
1179  return AVERROR_PATCHWELCOME;
1180  } else {
1181  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1182  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1183  }
1184  }
1185  }
1186  } else if (s->rct) {
1187  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1188  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1189  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1190  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1191  }
1192  } else if (s->pegasus_rct) {
1193  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1194  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1195  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1196  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1197  }
1198  } else if (s->bayer) {
1199  if (nb_components == 1) {
1200  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1201  for (mb_x = 0; mb_x < width; mb_x++)
1202  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1203  } else if (nb_components == 2) {
1204  for (mb_x = 0; mb_x < width; mb_x++) {
1205  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1206  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1207  }
1208  }
1209  } else {
1210  for(i=0; i<nb_components; i++) {
1211  int c= s->comp_index[i];
1212  if (s->bits <= 8) {
1213  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1214  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1215  }
1216  } else if(s->bits == 9) {
1217  return AVERROR_PATCHWELCOME;
1218  } else {
1219  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1220  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1221  }
1222  }
1223  }
1224  }
1225  }
1226  return 0;
1227 }
1228 
1230  int point_transform, int nb_components)
1231 {
1232  int i, mb_x, mb_y, mask;
1233  int bits= (s->bits+7)&~7;
1234  int resync_mb_y = 0;
1235  int resync_mb_x = 0;
1236 
1237  point_transform += bits - s->bits;
1238  mask = ((1 << s->bits) - 1) << point_transform;
1239 
1240  av_assert0(nb_components>=1 && nb_components<=4);
1241 
1242  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1243  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1244  if (get_bits_left(&s->gb) < 1) {
1245  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1246  return AVERROR_INVALIDDATA;
1247  }
1248  if (s->restart_interval && !s->restart_count){
1250  resync_mb_x = mb_x;
1251  resync_mb_y = mb_y;
1252  }
1253 
1254  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1255  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1256  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1257  for (i = 0; i < nb_components; i++) {
1258  uint8_t *ptr;
1259  uint16_t *ptr16;
1260  int n, h, v, x, y, c, j, linesize;
1261  n = s->nb_blocks[i];
1262  c = s->comp_index[i];
1263  h = s->h_scount[i];
1264  v = s->v_scount[i];
1265  x = 0;
1266  y = 0;
1267  linesize= s->linesize[c];
1268 
1269  if(bits>8) linesize /= 2;
1270 
1271  for(j=0; j<n; j++) {
1272  int pred, dc;
1273 
1274  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1275  if(dc == 0xFFFFF)
1276  return -1;
1277  if ( h * mb_x + x >= s->width
1278  || v * mb_y + y >= s->height) {
1279  // Nothing to do
1280  } else if (bits<=8) {
1281  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1282  if(y==0 && toprow){
1283  if(x==0 && leftcol){
1284  pred= 1 << (bits - 1);
1285  }else{
1286  pred= ptr[-1];
1287  }
1288  }else{
1289  if(x==0 && leftcol){
1290  pred= ptr[-linesize];
1291  }else{
1292  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1293  }
1294  }
1295 
1296  if (s->interlaced && s->bottom_field)
1297  ptr += linesize >> 1;
1298  pred &= mask;
1299  *ptr= pred + ((unsigned)dc << point_transform);
1300  }else{
1301  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1302  if(y==0 && toprow){
1303  if(x==0 && leftcol){
1304  pred= 1 << (bits - 1);
1305  }else{
1306  pred= ptr16[-1];
1307  }
1308  }else{
1309  if(x==0 && leftcol){
1310  pred= ptr16[-linesize];
1311  }else{
1312  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1313  }
1314  }
1315 
1316  if (s->interlaced && s->bottom_field)
1317  ptr16 += linesize >> 1;
1318  pred &= mask;
1319  *ptr16= pred + ((unsigned)dc << point_transform);
1320  }
1321  if (++x == h) {
1322  x = 0;
1323  y++;
1324  }
1325  }
1326  }
1327  } else {
1328  for (i = 0; i < nb_components; i++) {
1329  uint8_t *ptr;
1330  uint16_t *ptr16;
1331  int n, h, v, x, y, c, j, linesize, dc;
1332  n = s->nb_blocks[i];
1333  c = s->comp_index[i];
1334  h = s->h_scount[i];
1335  v = s->v_scount[i];
1336  x = 0;
1337  y = 0;
1338  linesize = s->linesize[c];
1339 
1340  if(bits>8) linesize /= 2;
1341 
1342  for (j = 0; j < n; j++) {
1343  int pred;
1344 
1345  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1346  if(dc == 0xFFFFF)
1347  return -1;
1348  if ( h * mb_x + x >= s->width
1349  || v * mb_y + y >= s->height) {
1350  // Nothing to do
1351  } else if (bits<=8) {
1352  ptr = s->picture_ptr->data[c] +
1353  (linesize * (v * mb_y + y)) +
1354  (h * mb_x + x); //FIXME optimize this crap
1355  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1356 
1357  pred &= mask;
1358  *ptr = pred + ((unsigned)dc << point_transform);
1359  }else{
1360  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1361  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1362 
1363  pred &= mask;
1364  *ptr16= pred + ((unsigned)dc << point_transform);
1365  }
1366 
1367  if (++x == h) {
1368  x = 0;
1369  y++;
1370  }
1371  }
1372  }
1373  }
1374  if (s->restart_interval && !--s->restart_count) {
1375  align_get_bits(&s->gb);
1376  skip_bits(&s->gb, 16); /* skip RSTn */
1377  }
1378  }
1379  }
1380  return 0;
1381 }
1382 
1384  uint8_t *dst, const uint8_t *src,
1385  int linesize, int lowres)
1386 {
1387  switch (lowres) {
1388  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1389  break;
1390  case 1: copy_block4(dst, src, linesize, linesize, 4);
1391  break;
1392  case 2: copy_block2(dst, src, linesize, linesize, 2);
1393  break;
1394  case 3: *dst = *src;
1395  break;
1396  }
1397 }
1398 
1399 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1400 {
1401  int block_x, block_y;
1402  int size = 8 >> s->avctx->lowres;
1403  if (s->bits > 8) {
1404  for (block_y=0; block_y<size; block_y++)
1405  for (block_x=0; block_x<size; block_x++)
1406  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1407  } else {
1408  for (block_y=0; block_y<size; block_y++)
1409  for (block_x=0; block_x<size; block_x++)
1410  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1411  }
1412 }
1413 
1414 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1415  int Al, const uint8_t *mb_bitmask,
1416  int mb_bitmask_size,
1417  const AVFrame *reference)
1418 {
1419  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1421  const uint8_t *reference_data[MAX_COMPONENTS];
1422  int linesize[MAX_COMPONENTS];
1423  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1424  int bytes_per_pixel = 1 + (s->bits > 8);
1425 
1426  if (mb_bitmask) {
1427  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1428  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1429  return AVERROR_INVALIDDATA;
1430  }
1431  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1432  }
1433 
1434  s->restart_count = 0;
1435 
1436  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1437  &chroma_v_shift);
1438  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1439  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1440 
1441  for (i = 0; i < nb_components; i++) {
1442  int c = s->comp_index[i];
1443  data[c] = s->picture_ptr->data[c];
1444  reference_data[c] = reference ? reference->data[c] : NULL;
1445  linesize[c] = s->linesize[c];
1446  s->coefs_finished[c] |= 1;
1447  }
1448 
1449  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1450  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1451  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1452 
1453  if (s->restart_interval && !s->restart_count)
1455 
1456  if (get_bits_left(&s->gb) < 0) {
1457  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1458  -get_bits_left(&s->gb));
1459  return AVERROR_INVALIDDATA;
1460  }
1461  for (i = 0; i < nb_components; i++) {
1462  uint8_t *ptr;
1463  int n, h, v, x, y, c, j;
1464  int block_offset;
1465  n = s->nb_blocks[i];
1466  c = s->comp_index[i];
1467  h = s->h_scount[i];
1468  v = s->v_scount[i];
1469  x = 0;
1470  y = 0;
1471  for (j = 0; j < n; j++) {
1472  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1473  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1474 
1475  if (s->interlaced && s->bottom_field)
1476  block_offset += linesize[c] >> 1;
1477  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1478  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1479  ptr = data[c] + block_offset;
1480  } else
1481  ptr = NULL;
1482  if (!s->progressive) {
1483  if (copy_mb) {
1484  if (ptr)
1485  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1486  linesize[c], s->avctx->lowres);
1487 
1488  } else {
1489  s->bdsp.clear_block(s->block);
1490  if (decode_block(s, s->block, i,
1491  s->dc_index[i], s->ac_index[i],
1492  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1494  "error y=%d x=%d\n", mb_y, mb_x);
1495  return AVERROR_INVALIDDATA;
1496  }
1497  if (ptr) {
1498  s->idsp.idct_put(ptr, linesize[c], s->block);
1499  if (s->bits & 7)
1500  shift_output(s, ptr, linesize[c]);
1501  }
1502  }
1503  } else {
1504  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1505  (h * mb_x + x);
1506  int16_t *block = s->blocks[c][block_idx];
1507  if (Ah)
1508  block[0] += get_bits1(&s->gb) *
1509  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1510  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1511  s->quant_matrixes[s->quant_sindex[i]],
1512  Al) < 0) {
1514  "error y=%d x=%d\n", mb_y, mb_x);
1515  return AVERROR_INVALIDDATA;
1516  }
1517  }
1518  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1519  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1520  mb_x, mb_y, x, y, c, s->bottom_field,
1521  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1522  if (++x == h) {
1523  x = 0;
1524  y++;
1525  }
1526  }
1527  }
1528 
1529  handle_rstn(s, nb_components);
1530  }
1531  }
1532  return 0;
1533 }
1534 
1536  int se, int Ah, int Al)
1537 {
1538  int mb_x, mb_y;
1539  int EOBRUN = 0;
1540  int c = s->comp_index[0];
1541  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1542 
1543  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1544  if (se < ss || se > 63) {
1545  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1546  return AVERROR_INVALIDDATA;
1547  }
1548 
1549  // s->coefs_finished is a bitmask for coefficients coded
1550  // ss and se are parameters telling start and end coefficients
1551  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1552 
1553  s->restart_count = 0;
1554 
1555  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1556  int block_idx = mb_y * s->block_stride[c];
1557  int16_t (*block)[64] = &s->blocks[c][block_idx];
1558  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1559  if (get_bits_left(&s->gb) <= 0) {
1560  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1561  return AVERROR_INVALIDDATA;
1562  }
1563  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1564  int ret;
1565  if (s->restart_interval && !s->restart_count)
1567 
1568  if (Ah)
1569  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1570  quant_matrix, ss, se, Al, &EOBRUN);
1571  else
1572  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1573  quant_matrix, ss, se, Al, &EOBRUN);
1574  if (ret < 0) {
1576  "error y=%d x=%d\n", mb_y, mb_x);
1577  return AVERROR_INVALIDDATA;
1578  }
1579 
1580  if (handle_rstn(s, 0))
1581  EOBRUN = 0;
1582  }
1583  }
1584  return 0;
1585 }
1586 
1588 {
1589  int mb_x, mb_y;
1590  int c;
1591  const int bytes_per_pixel = 1 + (s->bits > 8);
1592  const int block_size = s->lossless ? 1 : 8;
1593 
1594  for (c = 0; c < s->nb_components; c++) {
1595  uint8_t *data = s->picture_ptr->data[c];
1596  int linesize = s->linesize[c];
1597  int h = s->h_max / s->h_count[c];
1598  int v = s->v_max / s->v_count[c];
1599  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1600  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1601 
1602  if (~s->coefs_finished[c])
1603  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1604 
1605  if (s->interlaced && s->bottom_field)
1606  data += linesize >> 1;
1607 
1608  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1609  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1610  int block_idx = mb_y * s->block_stride[c];
1611  int16_t (*block)[64] = &s->blocks[c][block_idx];
1612  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1613  s->idsp.idct_put(ptr, linesize, *block);
1614  if (s->bits & 7)
1615  shift_output(s, ptr, linesize);
1616  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1617  }
1618  }
1619  }
1620 }
1621 
1623  int mb_bitmask_size, const AVFrame *reference)
1624 {
1625  int len, nb_components, i, h, v, predictor, point_transform;
1626  int index, id, ret;
1627  const int block_size = s->lossless ? 1 : 8;
1628  int ilv, prev_shift;
1629 
1630  if (!s->got_picture) {
1632  "Can not process SOS before SOF, skipping\n");
1633  return -1;
1634  }
1635 
1636  if (reference) {
1637  if (reference->width != s->picture_ptr->width ||
1638  reference->height != s->picture_ptr->height ||
1639  reference->format != s->picture_ptr->format) {
1640  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1641  return AVERROR_INVALIDDATA;
1642  }
1643  }
1644 
1645  /* XXX: verify len field validity */
1646  len = get_bits(&s->gb, 16);
1647  nb_components = get_bits(&s->gb, 8);
1648  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1650  "decode_sos: nb_components (%d)",
1651  nb_components);
1652  return AVERROR_PATCHWELCOME;
1653  }
1654  if (len != 6 + 2 * nb_components) {
1655  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1656  return AVERROR_INVALIDDATA;
1657  }
1658  for (i = 0; i < nb_components; i++) {
1659  id = get_bits(&s->gb, 8) - 1;
1660  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1661  /* find component index */
1662  for (index = 0; index < s->nb_components; index++)
1663  if (id == s->component_id[index])
1664  break;
1665  if (index == s->nb_components) {
1667  "decode_sos: index(%d) out of components\n", index);
1668  return AVERROR_INVALIDDATA;
1669  }
1670  /* Metasoft MJPEG codec has Cb and Cr swapped */
1671  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1672  && nb_components == 3 && s->nb_components == 3 && i)
1673  index = 3 - i;
1674 
1675  s->quant_sindex[i] = s->quant_index[index];
1676  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1677  s->h_scount[i] = s->h_count[index];
1678  s->v_scount[i] = s->v_count[index];
1679 
1680  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1681  index = (index+2)%3;
1682 
1683  s->comp_index[i] = index;
1684 
1685  s->dc_index[i] = get_bits(&s->gb, 4);
1686  s->ac_index[i] = get_bits(&s->gb, 4);
1687 
1688  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1689  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1690  goto out_of_range;
1691  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1692  goto out_of_range;
1693  }
1694 
1695  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1696  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1697  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1698  prev_shift = get_bits(&s->gb, 4); /* Ah */
1699  point_transform = get_bits(&s->gb, 4); /* Al */
1700  }else
1701  prev_shift = point_transform = 0;
1702 
1703  if (nb_components > 1) {
1704  /* interleaved stream */
1705  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1706  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1707  } else if (!s->ls) { /* skip this for JPEG-LS */
1708  h = s->h_max / s->h_scount[0];
1709  v = s->v_max / s->v_scount[0];
1710  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1711  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1712  s->nb_blocks[0] = 1;
1713  s->h_scount[0] = 1;
1714  s->v_scount[0] = 1;
1715  }
1716 
1717  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1718  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1719  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1720  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1721  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1722 
1723 
1724  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1725  for (i = s->mjpb_skiptosod; i > 0; i--)
1726  skip_bits(&s->gb, 8);
1727 
1728 next_field:
1729  for (i = 0; i < nb_components; i++)
1730  s->last_dc[i] = (4 << s->bits);
1731 
1732  if (s->avctx->hwaccel) {
1733  int bytes_to_start = get_bits_count(&s->gb) / 8;
1734  av_assert0(bytes_to_start >= 0 &&
1735  s->raw_scan_buffer_size >= bytes_to_start);
1736 
1737  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1738  s->raw_scan_buffer + bytes_to_start,
1739  s->raw_scan_buffer_size - bytes_to_start);
1740  if (ret < 0)
1741  return ret;
1742 
1743  } else if (s->lossless) {
1744  av_assert0(s->picture_ptr == s->picture);
1745  if (CONFIG_JPEGLS_DECODER && s->ls) {
1746 // for () {
1747 // reset_ls_coding_parameters(s, 0);
1748 
1749  if ((ret = ff_jpegls_decode_picture(s, predictor,
1750  point_transform, ilv)) < 0)
1751  return ret;
1752  } else {
1753  if (s->rgb || s->bayer) {
1754  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1755  return ret;
1756  } else {
1757  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1758  point_transform,
1759  nb_components)) < 0)
1760  return ret;
1761  }
1762  }
1763  } else {
1764  if (s->progressive && predictor) {
1765  av_assert0(s->picture_ptr == s->picture);
1766  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1767  ilv, prev_shift,
1768  point_transform)) < 0)
1769  return ret;
1770  } else {
1771  if ((ret = mjpeg_decode_scan(s, nb_components,
1772  prev_shift, point_transform,
1773  mb_bitmask, mb_bitmask_size, reference)) < 0)
1774  return ret;
1775  }
1776  }
1777 
1778  if (s->interlaced &&
1779  get_bits_left(&s->gb) > 32 &&
1780  show_bits(&s->gb, 8) == 0xFF) {
1781  GetBitContext bak = s->gb;
1782  align_get_bits(&bak);
1783  if (show_bits(&bak, 16) == 0xFFD1) {
1784  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1785  s->gb = bak;
1786  skip_bits(&s->gb, 16);
1787  s->bottom_field ^= 1;
1788 
1789  goto next_field;
1790  }
1791  }
1792 
1793  emms_c();
1794  return 0;
1795  out_of_range:
1796  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1797  return AVERROR_INVALIDDATA;
1798 }
1799 
1801 {
1802  if (get_bits(&s->gb, 16) != 4)
1803  return AVERROR_INVALIDDATA;
1804  s->restart_interval = get_bits(&s->gb, 16);
1805  s->restart_count = 0;
1806  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1807  s->restart_interval);
1808 
1809  return 0;
1810 }
1811 
1813 {
1814  int len, id, i;
1815 
1816  len = get_bits(&s->gb, 16);
1817  if (len < 6) {
1818  if (s->bayer) {
1819  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1820  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1821  skip_bits(&s->gb, len);
1822  return 0;
1823  } else
1824  return AVERROR_INVALIDDATA;
1825  }
1826  if (8 * len > get_bits_left(&s->gb))
1827  return AVERROR_INVALIDDATA;
1828 
1829  id = get_bits_long(&s->gb, 32);
1830  len -= 6;
1831 
1832  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1833  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1834  av_fourcc2str(av_bswap32(id)), id, len);
1835 
1836  /* Buggy AVID, it puts EOI only at every 10th frame. */
1837  /* Also, this fourcc is used by non-avid files too, it holds some
1838  information, but it's always present in AVID-created files. */
1839  if (id == AV_RB32("AVI1")) {
1840  /* structure:
1841  4bytes AVI1
1842  1bytes polarity
1843  1bytes always zero
1844  4bytes field_size
1845  4bytes field_size_less_padding
1846  */
1847  s->buggy_avid = 1;
1848  i = get_bits(&s->gb, 8); len--;
1849  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1850  goto out;
1851  }
1852 
1853  if (id == AV_RB32("JFIF")) {
1854  int t_w, t_h, v1, v2;
1855  if (len < 8)
1856  goto out;
1857  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1858  v1 = get_bits(&s->gb, 8);
1859  v2 = get_bits(&s->gb, 8);
1860  skip_bits(&s->gb, 8);
1861 
1862  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1863  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1864  if ( s->avctx->sample_aspect_ratio.num <= 0
1865  || s->avctx->sample_aspect_ratio.den <= 0) {
1866  s->avctx->sample_aspect_ratio.num = 0;
1867  s->avctx->sample_aspect_ratio.den = 1;
1868  }
1869 
1870  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1871  av_log(s->avctx, AV_LOG_INFO,
1872  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1873  v1, v2,
1876 
1877  len -= 8;
1878  if (len >= 2) {
1879  t_w = get_bits(&s->gb, 8);
1880  t_h = get_bits(&s->gb, 8);
1881  if (t_w && t_h) {
1882  /* skip thumbnail */
1883  if (len -10 - (t_w * t_h * 3) > 0)
1884  len -= t_w * t_h * 3;
1885  }
1886  len -= 2;
1887  }
1888  goto out;
1889  }
1890 
1891  if ( id == AV_RB32("Adob")
1892  && len >= 7
1893  && show_bits(&s->gb, 8) == 'e'
1894  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1895  skip_bits(&s->gb, 8); /* 'e' */
1896  skip_bits(&s->gb, 16); /* version */
1897  skip_bits(&s->gb, 16); /* flags0 */
1898  skip_bits(&s->gb, 16); /* flags1 */
1899  s->adobe_transform = get_bits(&s->gb, 8);
1900  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1901  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1902  len -= 7;
1903  goto out;
1904  }
1905 
1906  if (id == AV_RB32("LJIF")) {
1907  int rgb = s->rgb;
1908  int pegasus_rct = s->pegasus_rct;
1909  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1910  av_log(s->avctx, AV_LOG_INFO,
1911  "Pegasus lossless jpeg header found\n");
1912  skip_bits(&s->gb, 16); /* version ? */
1913  skip_bits(&s->gb, 16); /* unknown always 0? */
1914  skip_bits(&s->gb, 16); /* unknown always 0? */
1915  skip_bits(&s->gb, 16); /* unknown always 0? */
1916  switch (i=get_bits(&s->gb, 8)) {
1917  case 1:
1918  rgb = 1;
1919  pegasus_rct = 0;
1920  break;
1921  case 2:
1922  rgb = 1;
1923  pegasus_rct = 1;
1924  break;
1925  default:
1926  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1927  }
1928 
1929  len -= 9;
1930  if (s->got_picture)
1931  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1932  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1933  goto out;
1934  }
1935 
1936  s->rgb = rgb;
1937  s->pegasus_rct = pegasus_rct;
1938 
1939  goto out;
1940  }
1941  if (id == AV_RL32("colr") && len > 0) {
1942  s->colr = get_bits(&s->gb, 8);
1943  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1944  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1945  len --;
1946  goto out;
1947  }
1948  if (id == AV_RL32("xfrm") && len > 0) {
1949  s->xfrm = get_bits(&s->gb, 8);
1950  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1951  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1952  len --;
1953  goto out;
1954  }
1955 
1956  /* JPS extension by VRex */
1957  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1958  int flags, layout, type;
1959  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1960  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1961 
1962  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1963  skip_bits(&s->gb, 16); len -= 2; /* block length */
1964  skip_bits(&s->gb, 8); /* reserved */
1965  flags = get_bits(&s->gb, 8);
1966  layout = get_bits(&s->gb, 8);
1967  type = get_bits(&s->gb, 8);
1968  len -= 4;
1969 
1970  av_freep(&s->stereo3d);
1971  s->stereo3d = av_stereo3d_alloc();
1972  if (!s->stereo3d) {
1973  goto out;
1974  }
1975  if (type == 0) {
1977  } else if (type == 1) {
1978  switch (layout) {
1979  case 0x01:
1981  break;
1982  case 0x02:
1984  break;
1985  case 0x03:
1987  break;
1988  }
1989  if (!(flags & 0x04)) {
1991  }
1992  }
1993  goto out;
1994  }
1995 
1996  /* EXIF metadata */
1997  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
1998  GetByteContext gbytes;
1999  int ret, le, ifd_offset, bytes_read;
2000  const uint8_t *aligned;
2001 
2002  skip_bits(&s->gb, 16); // skip padding
2003  len -= 2;
2004 
2005  // init byte wise reading
2006  aligned = align_get_bits(&s->gb);
2007  bytestream2_init(&gbytes, aligned, len);
2008 
2009  // read TIFF header
2010  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2011  if (ret) {
2012  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2013  } else {
2014  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2015 
2016  // read 0th IFD and store the metadata
2017  // (return values > 0 indicate the presence of subimage metadata)
2018  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2019  if (ret < 0) {
2020  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2021  }
2022  }
2023 
2024  bytes_read = bytestream2_tell(&gbytes);
2025  skip_bits(&s->gb, bytes_read << 3);
2026  len -= bytes_read;
2027 
2028  goto out;
2029  }
2030 
2031  /* Apple MJPEG-A */
2032  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2033  id = get_bits_long(&s->gb, 32);
2034  len -= 4;
2035  /* Apple MJPEG-A */
2036  if (id == AV_RB32("mjpg")) {
2037  /* structure:
2038  4bytes field size
2039  4bytes pad field size
2040  4bytes next off
2041  4bytes quant off
2042  4bytes huff off
2043  4bytes image off
2044  4bytes scan off
2045  4bytes data off
2046  */
2047  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2048  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2049  }
2050  }
2051 
2052  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2053  int id2;
2054  unsigned seqno;
2055  unsigned nummarkers;
2056 
2057  id = get_bits_long(&s->gb, 32);
2058  id2 = get_bits(&s->gb, 24);
2059  len -= 7;
2060  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2061  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2062  goto out;
2063  }
2064 
2065  skip_bits(&s->gb, 8);
2066  seqno = get_bits(&s->gb, 8);
2067  len -= 2;
2068  if (seqno == 0) {
2069  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2070  goto out;
2071  }
2072 
2073  nummarkers = get_bits(&s->gb, 8);
2074  len -= 1;
2075  if (nummarkers == 0) {
2076  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2077  goto out;
2078  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2079  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2080  goto out;
2081  } else if (seqno > nummarkers) {
2082  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2083  goto out;
2084  }
2085 
2086  /* Allocate if this is the first APP2 we've seen. */
2087  if (s->iccnum == 0) {
2088  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2089  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2090  if (!s->iccdata || !s->iccdatalens) {
2091  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2092  return AVERROR(ENOMEM);
2093  }
2094  s->iccnum = nummarkers;
2095  }
2096 
2097  if (s->iccdata[seqno - 1]) {
2098  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2099  goto out;
2100  }
2101 
2102  s->iccdatalens[seqno - 1] = len;
2103  s->iccdata[seqno - 1] = av_malloc(len);
2104  if (!s->iccdata[seqno - 1]) {
2105  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2106  return AVERROR(ENOMEM);
2107  }
2108 
2109  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2110  skip_bits(&s->gb, len << 3);
2111  len = 0;
2112  s->iccread++;
2113 
2114  if (s->iccread > s->iccnum)
2115  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2116  }
2117 
2118 out:
2119  /* slow but needed for extreme adobe jpegs */
2120  if (len < 0)
2122  "mjpeg: error, decode_app parser read over the end\n");
2123  while (--len > 0)
2124  skip_bits(&s->gb, 8);
2125 
2126  return 0;
2127 }
2128 
2130 {
2131  int len = get_bits(&s->gb, 16);
2132  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2133  int i;
2134  char *cbuf = av_malloc(len - 1);
2135  if (!cbuf)
2136  return AVERROR(ENOMEM);
2137 
2138  for (i = 0; i < len - 2; i++)
2139  cbuf[i] = get_bits(&s->gb, 8);
2140  if (i > 0 && cbuf[i - 1] == '\n')
2141  cbuf[i - 1] = 0;
2142  else
2143  cbuf[i] = 0;
2144 
2145  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2146  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2147 
2148  /* buggy avid, it puts EOI only at every 10th frame */
2149  if (!strncmp(cbuf, "AVID", 4)) {
2150  parse_avid(s, cbuf, len);
2151  } else if (!strcmp(cbuf, "CS=ITU601"))
2152  s->cs_itu601 = 1;
2153  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2154  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2155  s->flipped = 1;
2156  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2157  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2158  s->multiscope = 2;
2159  }
2160 
2161  av_free(cbuf);
2162  }
2163 
2164  return 0;
2165 }
2166 
2167 /* return the 8 bit start code value and update the search
2168  state. Return -1 if no start code found */
2169 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2170 {
2171  const uint8_t *buf_ptr;
2172  unsigned int v, v2;
2173  int val;
2174  int skipped = 0;
2175 
2176  buf_ptr = *pbuf_ptr;
2177  while (buf_end - buf_ptr > 1) {
2178  v = *buf_ptr++;
2179  v2 = *buf_ptr;
2180  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2181  val = *buf_ptr++;
2182  goto found;
2183  }
2184  skipped++;
2185  }
2186  buf_ptr = buf_end;
2187  val = -1;
2188 found:
2189  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2190  *pbuf_ptr = buf_ptr;
2191  return val;
2192 }
2193 
2195  const uint8_t **buf_ptr, const uint8_t *buf_end,
2196  const uint8_t **unescaped_buf_ptr,
2197  int *unescaped_buf_size)
2198 {
2199  int start_code;
2200  start_code = find_marker(buf_ptr, buf_end);
2201 
2202  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2203  if (!s->buffer)
2204  return AVERROR(ENOMEM);
2205 
2206  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2207  if (start_code == SOS && !s->ls) {
2208  const uint8_t *src = *buf_ptr;
2209  const uint8_t *ptr = src;
2210  uint8_t *dst = s->buffer;
2211 
2212  #define copy_data_segment(skip) do { \
2213  ptrdiff_t length = (ptr - src) - (skip); \
2214  if (length > 0) { \
2215  memcpy(dst, src, length); \
2216  dst += length; \
2217  src = ptr; \
2218  } \
2219  } while (0)
2220 
2221  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2222  ptr = buf_end;
2223  copy_data_segment(0);
2224  } else {
2225  while (ptr < buf_end) {
2226  uint8_t x = *(ptr++);
2227 
2228  if (x == 0xff) {
2229  ptrdiff_t skip = 0;
2230  while (ptr < buf_end && x == 0xff) {
2231  x = *(ptr++);
2232  skip++;
2233  }
2234 
2235  /* 0xFF, 0xFF, ... */
2236  if (skip > 1) {
2237  copy_data_segment(skip);
2238 
2239  /* decrement src as it is equal to ptr after the
2240  * copy_data_segment macro and we might want to
2241  * copy the current value of x later on */
2242  src--;
2243  }
2244 
2245  if (x < RST0 || x > RST7) {
2246  copy_data_segment(1);
2247  if (x)
2248  break;
2249  }
2250  }
2251  }
2252  if (src < ptr)
2253  copy_data_segment(0);
2254  }
2255  #undef copy_data_segment
2256 
2257  *unescaped_buf_ptr = s->buffer;
2258  *unescaped_buf_size = dst - s->buffer;
2259  memset(s->buffer + *unescaped_buf_size, 0,
2261 
2262  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2263  (buf_end - *buf_ptr) - (dst - s->buffer));
2264  } else if (start_code == SOS && s->ls) {
2265  const uint8_t *src = *buf_ptr;
2266  uint8_t *dst = s->buffer;
2267  int bit_count = 0;
2268  int t = 0, b = 0;
2269  PutBitContext pb;
2270 
2271  /* find marker */
2272  while (src + t < buf_end) {
2273  uint8_t x = src[t++];
2274  if (x == 0xff) {
2275  while ((src + t < buf_end) && x == 0xff)
2276  x = src[t++];
2277  if (x & 0x80) {
2278  t -= FFMIN(2, t);
2279  break;
2280  }
2281  }
2282  }
2283  bit_count = t * 8;
2284  init_put_bits(&pb, dst, t);
2285 
2286  /* unescape bitstream */
2287  while (b < t) {
2288  uint8_t x = src[b++];
2289  put_bits(&pb, 8, x);
2290  if (x == 0xFF && b < t) {
2291  x = src[b++];
2292  if (x & 0x80) {
2293  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2294  x &= 0x7f;
2295  }
2296  put_bits(&pb, 7, x);
2297  bit_count--;
2298  }
2299  }
2300  flush_put_bits(&pb);
2301 
2302  *unescaped_buf_ptr = dst;
2303  *unescaped_buf_size = (bit_count + 7) >> 3;
2304  memset(s->buffer + *unescaped_buf_size, 0,
2306  } else {
2307  *unescaped_buf_ptr = *buf_ptr;
2308  *unescaped_buf_size = buf_end - *buf_ptr;
2309  }
2310 
2311  return start_code;
2312 }
2313 
2315 {
2316  int i;
2317 
2318  if (s->iccdata)
2319  for (i = 0; i < s->iccnum; i++)
2320  av_freep(&s->iccdata[i]);
2321  av_freep(&s->iccdata);
2322  av_freep(&s->iccdatalens);
2323 
2324  s->iccread = 0;
2325  s->iccnum = 0;
2326 }
2327 
2328 // SMV JPEG just stacks several output frames into one JPEG picture
2329 // we handle that by setting up the cropping parameters appropriately
2331 {
2332  MJpegDecodeContext *s = avctx->priv_data;
2333  int ret;
2334 
2335  if (s->smv_next_frame > 0) {
2336  av_assert0(s->smv_frame->buf[0]);
2337  av_frame_unref(frame);
2338  ret = av_frame_ref(frame, s->smv_frame);
2339  if (ret < 0)
2340  return ret;
2341  } else {
2342  av_assert0(frame->buf[0]);
2344  ret = av_frame_ref(s->smv_frame, frame);
2345  if (ret < 0)
2346  return ret;
2347  }
2348 
2349  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2350 
2351  frame->width = avctx->coded_width;
2352  frame->height = avctx->coded_height;
2353  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2354  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2355 
2357 
2358  if (s->smv_next_frame == 0)
2360 
2361  return 0;
2362 }
2363 
2365 {
2366  MJpegDecodeContext *s = avctx->priv_data;
2367  int ret;
2368 
2369  av_packet_unref(s->pkt);
2370  ret = ff_decode_get_packet(avctx, s->pkt);
2371  if (ret < 0)
2372  return ret;
2373 
2374 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2375  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2376  avctx->codec_id == AV_CODEC_ID_AMV) {
2377  ret = ff_sp5x_process_packet(avctx, s->pkt);
2378  if (ret < 0)
2379  return ret;
2380  }
2381 #endif
2382 
2383  s->buf_size = s->pkt->size;
2384 
2385  return 0;
2386 }
2387 
2389 {
2390  MJpegDecodeContext *s = avctx->priv_data;
2391  const uint8_t *buf_end, *buf_ptr;
2392  const uint8_t *unescaped_buf_ptr;
2393  int hshift, vshift;
2394  int unescaped_buf_size;
2395  int start_code;
2396  int i, index;
2397  int ret = 0;
2398  int is16bit;
2399 
2400  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2401  return smv_process_frame(avctx, frame);
2402 
2404  av_freep(&s->stereo3d);
2405  s->adobe_transform = -1;
2406 
2407  if (s->iccnum != 0)
2408  reset_icc_profile(s);
2409 
2410  ret = mjpeg_get_packet(avctx);
2411  if (ret < 0)
2412  return ret;
2413 
2414  buf_ptr = s->pkt->data;
2415  buf_end = s->pkt->data + s->pkt->size;
2416  while (buf_ptr < buf_end) {
2417  /* find start next marker */
2418  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2419  &unescaped_buf_ptr,
2420  &unescaped_buf_size);
2421  /* EOF */
2422  if (start_code < 0) {
2423  break;
2424  } else if (unescaped_buf_size > INT_MAX / 8) {
2425  av_log(avctx, AV_LOG_ERROR,
2426  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2427  start_code, unescaped_buf_size, s->pkt->size);
2428  return AVERROR_INVALIDDATA;
2429  }
2430  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2431  start_code, buf_end - buf_ptr);
2432 
2433  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2434 
2435  if (ret < 0) {
2436  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2437  goto fail;
2438  }
2439 
2440  s->start_code = start_code;
2441  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2442  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2443 
2444  /* process markers */
2445  if (start_code >= RST0 && start_code <= RST7) {
2446  av_log(avctx, AV_LOG_DEBUG,
2447  "restart marker: %d\n", start_code & 0x0f);
2448  /* APP fields */
2449  } else if (start_code >= APP0 && start_code <= APP15) {
2450  if ((ret = mjpeg_decode_app(s)) < 0)
2451  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2452  av_err2str(ret));
2453  /* Comment */
2454  } else if (start_code == COM) {
2455  ret = mjpeg_decode_com(s);
2456  if (ret < 0)
2457  return ret;
2458  } else if (start_code == DQT) {
2459  ret = ff_mjpeg_decode_dqt(s);
2460  if (ret < 0)
2461  return ret;
2462  }
2463 
2464  ret = -1;
2465 
2466  if (!CONFIG_JPEGLS_DECODER &&
2467  (start_code == SOF48 || start_code == LSE)) {
2468  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2469  return AVERROR(ENOSYS);
2470  }
2471 
2472  if (avctx->skip_frame == AVDISCARD_ALL) {
2473  switch(start_code) {
2474  case SOF0:
2475  case SOF1:
2476  case SOF2:
2477  case SOF3:
2478  case SOF48:
2479  case SOI:
2480  case SOS:
2481  case EOI:
2482  break;
2483  default:
2484  goto skip;
2485  }
2486  }
2487 
2488  switch (start_code) {
2489  case SOI:
2490  s->restart_interval = 0;
2491  s->restart_count = 0;
2492  s->raw_image_buffer = buf_ptr;
2493  s->raw_image_buffer_size = buf_end - buf_ptr;
2494  /* nothing to do on SOI */
2495  break;
2496  case DHT:
2497  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2498  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2499  goto fail;
2500  }
2501  break;
2502  case SOF0:
2503  case SOF1:
2504  if (start_code == SOF0)
2506  else
2508  s->lossless = 0;
2509  s->ls = 0;
2510  s->progressive = 0;
2511  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2512  goto fail;
2513  break;
2514  case SOF2:
2516  s->lossless = 0;
2517  s->ls = 0;
2518  s->progressive = 1;
2519  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2520  goto fail;
2521  break;
2522  case SOF3:
2525  s->lossless = 1;
2526  s->ls = 0;
2527  s->progressive = 0;
2528  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2529  goto fail;
2530  break;
2531  case SOF48:
2534  s->lossless = 1;
2535  s->ls = 1;
2536  s->progressive = 0;
2537  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2538  goto fail;
2539  break;
2540  case LSE:
2541  if (!CONFIG_JPEGLS_DECODER ||
2542  (ret = ff_jpegls_decode_lse(s)) < 0)
2543  goto fail;
2544  break;
2545  case EOI:
2546 eoi_parser:
2547  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2548  s->progressive && s->cur_scan && s->got_picture)
2550  s->cur_scan = 0;
2551  if (!s->got_picture) {
2552  av_log(avctx, AV_LOG_WARNING,
2553  "Found EOI before any SOF, ignoring\n");
2554  break;
2555  }
2556  if (s->interlaced) {
2557  s->bottom_field ^= 1;
2558  /* if not bottom field, do not output image yet */
2559  if (s->bottom_field == !s->interlace_polarity)
2560  break;
2561  }
2562  if (avctx->skip_frame == AVDISCARD_ALL) {
2563  s->got_picture = 0;
2564  ret = AVERROR(EAGAIN);
2565  goto the_end_no_picture;
2566  }
2567  if (s->avctx->hwaccel) {
2568  ret = s->avctx->hwaccel->end_frame(s->avctx);
2569  if (ret < 0)
2570  return ret;
2571 
2573  }
2574  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2575  return ret;
2576  s->got_picture = 0;
2577 
2578  frame->pkt_dts = s->pkt->dts;
2579 
2580  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2581  int qp = FFMAX3(s->qscale[0],
2582  s->qscale[1],
2583  s->qscale[2]);
2584 
2585  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2586  }
2587 
2588  goto the_end;
2589  case SOS:
2590  s->raw_scan_buffer = buf_ptr;
2591  s->raw_scan_buffer_size = buf_end - buf_ptr;
2592 
2593  s->cur_scan++;
2594  if (avctx->skip_frame == AVDISCARD_ALL) {
2595  skip_bits(&s->gb, get_bits_left(&s->gb));
2596  break;
2597  }
2598 
2599  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2600  (avctx->err_recognition & AV_EF_EXPLODE))
2601  goto fail;
2602  break;
2603  case DRI:
2604  if ((ret = mjpeg_decode_dri(s)) < 0)
2605  return ret;
2606  break;
2607  case SOF5:
2608  case SOF6:
2609  case SOF7:
2610  case SOF9:
2611  case SOF10:
2612  case SOF11:
2613  case SOF13:
2614  case SOF14:
2615  case SOF15:
2616  case JPG:
2617  av_log(avctx, AV_LOG_ERROR,
2618  "mjpeg: unsupported coding type (%x)\n", start_code);
2619  break;
2620  }
2621 
2622 skip:
2623  /* eof process start code */
2624  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2625  av_log(avctx, AV_LOG_DEBUG,
2626  "marker parser used %d bytes (%d bits)\n",
2627  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2628  }
2629  if (s->got_picture && s->cur_scan) {
2630  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2631  goto eoi_parser;
2632  }
2633  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2634  return AVERROR_INVALIDDATA;
2635 fail:
2636  s->got_picture = 0;
2637  return ret;
2638 the_end:
2639 
2640  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2641 
2642  if (AV_RB32(s->upscale_h)) {
2643  int p;
2645  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2646  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2648  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2649  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2652  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2654  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2655  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2656  );
2657  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2658  if (ret)
2659  return ret;
2660 
2662  for (p = 0; p<s->nb_components; p++) {
2663  uint8_t *line = s->picture_ptr->data[p];
2664  int w = s->width;
2665  int h = s->height;
2666  if (!s->upscale_h[p])
2667  continue;
2668  if (p==1 || p==2) {
2669  w = AV_CEIL_RSHIFT(w, hshift);
2670  h = AV_CEIL_RSHIFT(h, vshift);
2671  }
2672  if (s->upscale_v[p] == 1)
2673  h = (h+1)>>1;
2674  av_assert0(w > 0);
2675  for (i = 0; i < h; i++) {
2676  if (s->upscale_h[p] == 1) {
2677  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2678  else line[w - 1] = line[(w - 1) / 2];
2679  for (index = w - 2; index > 0; index--) {
2680  if (is16bit)
2681  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2682  else
2683  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2684  }
2685  } else if (s->upscale_h[p] == 2) {
2686  if (is16bit) {
2687  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2688  if (w > 1)
2689  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2690  } else {
2691  line[w - 1] = line[(w - 1) / 3];
2692  if (w > 1)
2693  line[w - 2] = line[w - 1];
2694  }
2695  for (index = w - 3; index > 0; index--) {
2696  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2697  }
2698  }
2699  line += s->linesize[p];
2700  }
2701  }
2702  }
2703  if (AV_RB32(s->upscale_v)) {
2704  int p;
2706  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2707  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2708  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2709  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2710  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2711  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2712  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2714  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2715  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2716  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2717  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2718  );
2719  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2720  if (ret)
2721  return ret;
2722 
2724  for (p = 0; p < s->nb_components; p++) {
2725  uint8_t *dst;
2726  int w = s->width;
2727  int h = s->height;
2728  if (!s->upscale_v[p])
2729  continue;
2730  if (p==1 || p==2) {
2731  w = AV_CEIL_RSHIFT(w, hshift);
2732  h = AV_CEIL_RSHIFT(h, vshift);
2733  }
2734  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2735  for (i = h - 1; i; i--) {
2736  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2737  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2738  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2739  memcpy(dst, src1, w);
2740  } else {
2741  for (index = 0; index < w; index++)
2742  dst[index] = (src1[index] + src2[index]) >> 1;
2743  }
2744  dst -= s->linesize[p];
2745  }
2746  }
2747  }
2748  if (s->flipped && !s->rgb) {
2749  int j;
2750  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2751  if (ret)
2752  return ret;
2753 
2755  for (index=0; index<s->nb_components; index++) {
2756  uint8_t *dst = s->picture_ptr->data[index];
2757  int w = s->picture_ptr->width;
2758  int h = s->picture_ptr->height;
2759  if(index && index<3){
2760  w = AV_CEIL_RSHIFT(w, hshift);
2761  h = AV_CEIL_RSHIFT(h, vshift);
2762  }
2763  if(dst){
2764  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2765  for (i=0; i<h/2; i++) {
2766  for (j=0; j<w; j++)
2767  FFSWAP(int, dst[j], dst2[j]);
2768  dst += s->picture_ptr->linesize[index];
2769  dst2 -= s->picture_ptr->linesize[index];
2770  }
2771  }
2772  }
2773  }
2774  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2775  int w = s->picture_ptr->width;
2776  int h = s->picture_ptr->height;
2777  av_assert0(s->nb_components == 4);
2778  for (i=0; i<h; i++) {
2779  int j;
2780  uint8_t *dst[4];
2781  for (index=0; index<4; index++) {
2782  dst[index] = s->picture_ptr->data[index]
2783  + s->picture_ptr->linesize[index]*i;
2784  }
2785  for (j=0; j<w; j++) {
2786  int k = dst[3][j];
2787  int r = dst[0][j] * k;
2788  int g = dst[1][j] * k;
2789  int b = dst[2][j] * k;
2790  dst[0][j] = g*257 >> 16;
2791  dst[1][j] = b*257 >> 16;
2792  dst[2][j] = r*257 >> 16;
2793  dst[3][j] = 255;
2794  }
2795  }
2796  }
2797  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2798  int w = s->picture_ptr->width;
2799  int h = s->picture_ptr->height;
2800  av_assert0(s->nb_components == 4);
2801  for (i=0; i<h; i++) {
2802  int j;
2803  uint8_t *dst[4];
2804  for (index=0; index<4; index++) {
2805  dst[index] = s->picture_ptr->data[index]
2806  + s->picture_ptr->linesize[index]*i;
2807  }
2808  for (j=0; j<w; j++) {
2809  int k = dst[3][j];
2810  int r = (255 - dst[0][j]) * k;
2811  int g = (128 - dst[1][j]) * k;
2812  int b = (128 - dst[2][j]) * k;
2813  dst[0][j] = r*257 >> 16;
2814  dst[1][j] = (g*257 >> 16) + 128;
2815  dst[2][j] = (b*257 >> 16) + 128;
2816  dst[3][j] = 255;
2817  }
2818  }
2819  }
2820 
2821  if (s->stereo3d) {
2822  AVStereo3D *stereo = av_stereo3d_create_side_data(frame);
2823  if (stereo) {
2824  stereo->type = s->stereo3d->type;
2825  stereo->flags = s->stereo3d->flags;
2826  }
2827  av_freep(&s->stereo3d);
2828  }
2829 
2830  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2831  AVFrameSideData *sd;
2832  size_t offset = 0;
2833  int total_size = 0;
2834  int i;
2835 
2836  /* Sum size of all parts. */
2837  for (i = 0; i < s->iccnum; i++)
2838  total_size += s->iccdatalens[i];
2839 
2840  sd = av_frame_new_side_data(frame, AV_FRAME_DATA_ICC_PROFILE, total_size);
2841  if (!sd) {
2842  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2843  return AVERROR(ENOMEM);
2844  }
2845 
2846  /* Reassemble the parts, which are now in-order. */
2847  for (i = 0; i < s->iccnum; i++) {
2848  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2849  offset += s->iccdatalens[i];
2850  }
2851  }
2852 
2853  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2855 
2856  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2857  ret = smv_process_frame(avctx, frame);
2858  if (ret < 0) {
2859  av_frame_unref(frame);
2860  return ret;
2861  }
2862  }
2863  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2864  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2865  avctx->coded_height > s->orig_height) {
2866  frame->height = avctx->coded_height;
2867  frame->crop_top = frame->height - s->orig_height;
2868  }
2869 
2870  ret = 0;
2871 
2872 the_end_no_picture:
2873  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2874  buf_end - buf_ptr);
2875 
2876  return ret;
2877 }
2878 
2879 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2880  * even without having called ff_mjpeg_decode_init(). */
2882 {
2883  MJpegDecodeContext *s = avctx->priv_data;
2884  int i, j;
2885 
2886  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2887  av_log(avctx, AV_LOG_INFO, "Single field\n");
2888  }
2889 
2890  if (s->picture) {
2891  av_frame_free(&s->picture);
2892  s->picture_ptr = NULL;
2893  } else if (s->picture_ptr)
2895 
2896  av_packet_free(&s->pkt);
2897 
2898  av_frame_free(&s->smv_frame);
2899 
2900  av_freep(&s->buffer);
2901  av_freep(&s->stereo3d);
2902  av_freep(&s->ljpeg_buffer);
2903  s->ljpeg_buffer_size = 0;
2904 
2905  for (i = 0; i < 3; i++) {
2906  for (j = 0; j < 4; j++)
2907  ff_free_vlc(&s->vlcs[i][j]);
2908  }
2909  for (i = 0; i < MAX_COMPONENTS; i++) {
2910  av_freep(&s->blocks[i]);
2911  av_freep(&s->last_nnz[i]);
2912  }
2914 
2915  reset_icc_profile(s);
2916 
2918 
2919  return 0;
2920 }
2921 
2922 static void decode_flush(AVCodecContext *avctx)
2923 {
2924  MJpegDecodeContext *s = avctx->priv_data;
2925  s->got_picture = 0;
2926 
2927  s->smv_next_frame = 0;
2929 }
2930 
2931 #if CONFIG_MJPEG_DECODER
2932 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2933 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2934 static const AVOption options[] = {
2935  { "extern_huff", "Use external huffman table.",
2936  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2937  { NULL },
2938 };
2939 
2940 static const AVClass mjpegdec_class = {
2941  .class_name = "MJPEG decoder",
2942  .item_name = av_default_item_name,
2943  .option = options,
2944  .version = LIBAVUTIL_VERSION_INT,
2945 };
2946 
2948  .name = "mjpeg",
2949  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2950  .type = AVMEDIA_TYPE_VIDEO,
2951  .id = AV_CODEC_ID_MJPEG,
2952  .priv_data_size = sizeof(MJpegDecodeContext),
2954  .close = ff_mjpeg_decode_end,
2956  .flush = decode_flush,
2957  .capabilities = AV_CODEC_CAP_DR1,
2958  .max_lowres = 3,
2959  .priv_class = &mjpegdec_class,
2963  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2964 #if CONFIG_MJPEG_NVDEC_HWACCEL
2965  HWACCEL_NVDEC(mjpeg),
2966 #endif
2967 #if CONFIG_MJPEG_VAAPI_HWACCEL
2968  HWACCEL_VAAPI(mjpeg),
2969 #endif
2970  NULL
2971  },
2972 };
2973 #endif
2974 #if CONFIG_THP_DECODER
2976  .name = "thp",
2977  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2978  .type = AVMEDIA_TYPE_VIDEO,
2979  .id = AV_CODEC_ID_THP,
2980  .priv_data_size = sizeof(MJpegDecodeContext),
2982  .close = ff_mjpeg_decode_end,
2984  .flush = decode_flush,
2985  .capabilities = AV_CODEC_CAP_DR1,
2986  .max_lowres = 3,
2989 };
2990 #endif
2991 
2992 #if CONFIG_SMVJPEG_DECODER
2994  .name = "smvjpeg",
2995  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
2996  .type = AVMEDIA_TYPE_VIDEO,
2997  .id = AV_CODEC_ID_SMVJPEG,
2998  .priv_data_size = sizeof(MJpegDecodeContext),
3000  .close = ff_mjpeg_decode_end,
3002  .flush = decode_flush,
3003  .capabilities = AV_CODEC_CAP_DR1,
3006 };
3007 #endif
int block_stride[MAX_COMPONENTS]
Definition: mjpegdec.h:89
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1323
const struct AVCodec * codec
Definition: avcodec.h:540
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
Definition: mjpegdec.h:139
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Definition: mjpeg.h:81
int v_count[MAX_COMPONENTS]
Definition: mjpegdec.h:92
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
Definition: mjpegdec.h:159
Definition: mjpeg.h:71
Definition: mjpeg.h:111
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
Definition: mjpeg.h:73
float re
Definition: fft.c:82
Definition: mjpeg.h:40
#define VD
Definition: av1dec.c:1110
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
Definition: mjpeg.h:42
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:105
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
const char * g
Definition: vf_curves.c:117
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:389
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
size_t raw_image_buffer_size
Definition: mjpegdec.h:152
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2364
int h_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:97
BlockDSPContext bdsp
Definition: mjpegdec.h:114
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:193
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2129
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
TIFF constants & data structures.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
int qscale[4]
quantizer scale calculated from quant_matrixes
Definition: mjpegdec.h:61
int size
Definition: packet.h:364
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
uint8_t * buffer
Definition: mjpegdec.h:57
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:910
AVPacket * pkt
Definition: mjpegdec.h:53
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Definition: mjpeg.h:68
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:71
int dc_index[MAX_COMPONENTS]
Definition: mjpegdec.h:94
Definition: mjpeg.h:75
#define FF_ARRAY_ELEMS(a)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
Definition: mjpeg.h:53
int linesize[MAX_COMPONENTS]
linesize << interlaced
Definition: mjpegdec.h:106
discard all
Definition: avcodec.h:236
uint8_t permutated[64]
Definition: idctdsp.h:33
Views are next to each other.
Definition: stereo3d.h:67
uint8_t upscale_v[4]
Definition: mjpegdec.h:73
uint8_t run
Definition: svq3.c:205
size_t crop_bottom
Definition: frame.h:675
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:560
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1742
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:797
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1675
int profile
profile
Definition: avcodec.h:1849
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:234
AVCodec.
Definition: codec.h:190
EXIF metadata parser.
JPEG-LS decoder.
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1951
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
int comp_index[MAX_COMPONENTS]
Definition: mjpegdec.h:93
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2314
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1587
HpelDSPContext hdsp
Definition: mjpegdec.h:115
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:654
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1949
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1997
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
const uint8_t * raw_image_buffer
Definition: mjpegdec.h:151
int16_t block[64]
Definition: mjpegdec.h:108
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
Definition: mjpeg.h:72
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:64
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1800
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1619
uint16_t(* ljpeg_buffer)[4]
Definition: mjpegdec.h:131
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Definition: mjpeg.h:46
unsigned int ljpeg_buffer_size
Definition: mjpegdec.h:132
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:221
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2175
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1953
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:632
Definition: mjpeg.h:54
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:91
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
uint8_t * last_nnz[MAX_COMPONENTS]
Definition: mjpegdec.h:110
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
AVFrame * picture_ptr
Definition: mjpegdec.h:104
Structure to hold side data for an AVFrame.
Definition: frame.h:220
#define height
uint8_t * data
Definition: packet.h:363
int quant_sindex[MAX_COMPONENTS]
Definition: mjpegdec.h:99
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1952
int h_count[MAX_COMPONENTS]
Definition: mjpegdec.h:91
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:390
AVDictionary * metadata
metadata.
Definition: frame.h:600
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:461
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1749
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1173
#define av_log(a,...)
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
static int aligned(int val)
Definition: dashdec.c:168
#define src
Definition: vp8dsp.c:255
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
enum AVCodecID id
Definition: codec.h:204
AVDictionary * exif_metadata
Definition: mjpegdec.h:135
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:864
uint8_t ** iccdata
Definition: mjpegdec.h:141
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1028
static const uint16_t mask[17]
Definition: lzw.c:38
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:963
#define PTRDIFF_SPECIFIER
Definition: internal.h:190
int nb_blocks[MAX_COMPONENTS]
Definition: mjpegdec.h:96
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2881
VLC vlcs[3][4]
Definition: mjpegdec.h:60
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:96
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:115
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
const char * r
Definition: vf_curves.c:116
unsigned int pos
Definition: spdifenc.c:412
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your local see the OFFSET() macro
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
#define av_clip_int16
Definition: common.h:137
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
uint8_t bits
Definition: vp3data.h:141
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1414
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2169
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define FFMAX(a, b)
Definition: common.h:103
#define fail()
Definition: checkasm.h:133
Definition: mjpeg.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:67
Definition: mjpeg.h:70
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
size_t crop_top
Definition: frame.h:674
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
JPEG-LS.
Definition: mjpeg.h:103
Definition: mjpeg.h:79
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ScanTable scantable
Definition: mjpegdec.h:113
Definition: mjpeg.h:80
#define b
Definition: input.c:41
AVFrame * smv_frame
Definition: mjpegdec.h:146
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1383
Definition: mjpeg.h:56
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:295
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1640
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define FFMIN(a, b)
Definition: common.h:105
Definition: mjpeg.h:44
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t interlaced
Definition: mxfenc.c:2207
#define width
int component_id[MAX_COMPONENTS]
Definition: mjpegdec.h:90
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1812
#define NEG_USR32(a, s)
Definition: mathops.h:166
uint8_t w
Definition: llviddspenc.c:39
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
uint8_t raw_huffman_lengths[2][4][16]
Definition: mjpegdec.h:156
Definition: mjpeg.h:41
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1950
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int quant_index[4]
Definition: mjpegdec.h:101
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int v_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:98
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1651
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVCodec ff_smvjpeg_decoder
GetBitContext gb
Definition: mjpegdec.h:50
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
HW acceleration through CUDA.
Definition: pixfmt.h:235
#define ZERO_RUN
Definition: mjpegdec.c:945
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
Full range content.
Definition: pixfmt.h:586
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
AVCodec ff_mjpeg_decoder
IDCTDSPContext idsp
Definition: mjpegdec.h:116
#define src1
Definition: h264pred.c:140
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define av_bswap32
Definition: bswap.h:33
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2388
Libavcodec external API header.
Views are on top of each other.
Definition: stereo3d.h:79
Definition: mjpeg.h:52
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:91
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2492
enum AVCodecID codec_id
Definition: avcodec.h:541
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
int debug
debug
Definition: avcodec.h:1618
AVStereo3D * stereo3d
Definition: mjpegdec.h:137
main external API structure.
Definition: avcodec.h:531
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:556
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1902
uint8_t * data
Definition: frame.h:222
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int extradata_size
Definition: avcodec.h:633
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:107
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:348
int coded_height
Definition: avcodec.h:719
Describe the class of an AVClass context structure.
Definition: log.h:67
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:726
int index
Definition: gxfenc.c:89
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:779
int ac_index[MAX_COMPONENTS]
Definition: mjpegdec.h:95
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1159
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1063
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
static const SheerTable rgb[2]
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
Definition: mjpeg.h:45
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
Definition: mjpegdec.h:111
Definition: mjpeg.h:48
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
enum AVPixelFormat hwaccel_pix_fmt
Definition: mjpegdec.h:160
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
uint8_t raw_huffman_values[2][4][256]
Definition: mjpegdec.h:157
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1535
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
#define MIN_CACHE_BITS
Definition: get_bits.h:128
Definition: mjpeg.h:47
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
Definition: mjpeg.h:104
#define flags(name, subs,...)
Definition: cbs_av1.c:561
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
size_t raw_scan_buffer_size
Definition: mjpegdec.h:154
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:56
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2481
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
uint8_t level
Definition: svq3.c:206
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1622
Narrow or limited range content.
Definition: pixfmt.h:569
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2453
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:116
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:846
Definition: mjpeg.h:94
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:423
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1229
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
const OptionDef options[]
Definition: ffmpeg_opt.c:3424
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:117
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:61
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define FF_DEBUG_QP
Definition: avcodec.h:1623
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2174
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:64
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int lowres
Definition: ffplay.c:336
const uint8_t * raw_scan_buffer
Definition: mjpegdec.h:153
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
AVCodecContext * avctx
Definition: mjpegdec.h:49
void * priv_data
Definition: avcodec.h:558
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1626
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1399
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:466
int got_picture
we found a SOF and picture is valid, too.
Definition: mjpegdec.h:105
int len
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2501
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
Definition: mjpegdec.h:109
AVFrame * picture
Definition: mjpegdec.h:103
void * hwaccel_picture_private
Definition: mjpegdec.h:161
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
The official guide to swscale for confused that is
Definition: swscale.txt:2
Definition: mjpeg.h:50
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:53
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
int last_dc[MAX_COMPONENTS]
Definition: mjpegdec.h:102
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:54
#define REFINE_BIT(j)
Definition: mjpegdec.c:937
uint8_t upscale_h[4]
Definition: mjpegdec.h:72
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2330
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2922
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1222
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1188
#define av_always_inline
Definition: attributes.h:45
static const uint8_t start_code[]
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:608
Definition: mjpeg.h:82
#define FFSWAP(type, a, b)
Definition: common.h:108
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2194
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
MJPEG decoder.
#define MKTAG(a, b, c, d)
Definition: common.h:478
AVCodec ff_thp_decoder
Definition: mjpeg.h:61
enum AVCodecID id
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
Definition: rpzaenc.c:58
uint16_t quant_matrixes[4][64]
Definition: mjpegdec.h:59
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define FFMAX3(a, b, c)
Definition: common.h:104
GLuint buffer
Definition: opengl_enc.c:101
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
Definition: mjpeg.h:49
bitstream writer API