FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/emms.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/mem.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "blockdsp.h"
43 #include "codec_internal.h"
44 #include "copy_block.h"
45 #include "decode.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 #include "exif.h"
57 #include "bytestream.h"
58 #include "tiff_common.h"
59 
60 
62 {
63  static const struct {
64  int class;
65  int index;
66  const uint8_t *bits;
67  const uint8_t *values;
68  int length;
69  } ht[] = {
71  ff_mjpeg_val_dc, 12 },
73  ff_mjpeg_val_dc, 12 },
82  };
83  int i, ret;
84 
85  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
86  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
87  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
88  ht[i].bits, ht[i].values,
89  ht[i].class == 1, s->avctx);
90  if (ret < 0)
91  return ret;
92 
93  if (ht[i].class < 2) {
94  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
95  ht[i].bits + 1, 16);
96  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
97  ht[i].values, ht[i].length);
98  }
99  }
100 
101  return 0;
102 }
103 
104 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
105 {
106  s->buggy_avid = 1;
107  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
108  s->interlace_polarity = 1;
109  if (len > 14 && buf[12] == 2) /* 2 - PAL */
110  s->interlace_polarity = 0;
111  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
112  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
113 }
114 
115 static void init_idct(AVCodecContext *avctx)
116 {
117  MJpegDecodeContext *s = avctx->priv_data;
118 
119  ff_idctdsp_init(&s->idsp, avctx);
120  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
121  s->idsp.idct_permutation);
122 }
123 
125 {
126  MJpegDecodeContext *s = avctx->priv_data;
127  int ret;
128 
129  if (!s->picture_ptr) {
130  s->picture = av_frame_alloc();
131  if (!s->picture)
132  return AVERROR(ENOMEM);
133  s->picture_ptr = s->picture;
134  }
135 
136  s->avctx = avctx;
137  ff_blockdsp_init(&s->bdsp);
138  ff_hpeldsp_init(&s->hdsp, avctx->flags);
139  init_idct(avctx);
140  s->buffer_size = 0;
141  s->buffer = NULL;
142  s->start_code = -1;
143  s->first_picture = 1;
144  s->got_picture = 0;
145  s->orig_height = avctx->coded_height;
147  avctx->colorspace = AVCOL_SPC_BT470BG;
148  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
149 
150  if ((ret = init_default_huffman_tables(s)) < 0)
151  return ret;
152 
153  if (s->extern_huff) {
154  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
155  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
156  return ret;
157  if (ff_mjpeg_decode_dht(s)) {
158  av_log(avctx, AV_LOG_ERROR,
159  "error using external huffman table, switching back to internal\n");
160  if ((ret = init_default_huffman_tables(s)) < 0)
161  return ret;
162  }
163  }
164  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
165  s->interlace_polarity = 1; /* bottom field first */
166  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
167  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
168  if (avctx->codec_tag == AV_RL32("MJPG"))
169  s->interlace_polarity = 1;
170  }
171 
172  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
173  if (avctx->extradata_size >= 4)
174  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
175 
176  if (s->smv_frames_per_jpeg <= 0) {
177  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
178  return AVERROR_INVALIDDATA;
179  }
180 
181  s->smv_frame = av_frame_alloc();
182  if (!s->smv_frame)
183  return AVERROR(ENOMEM);
184  } else if (avctx->extradata_size > 8
185  && AV_RL32(avctx->extradata) == 0x2C
186  && AV_RL32(avctx->extradata+4) == 0x18) {
187  parse_avid(s, avctx->extradata, avctx->extradata_size);
188  }
189 
190  if (avctx->codec->id == AV_CODEC_ID_AMV)
191  s->flipped = 1;
192 
193  return 0;
194 }
195 
196 
197 /* quantize tables */
199 {
200  int len, index, i;
201 
202  len = get_bits(&s->gb, 16) - 2;
203 
204  if (8*len > get_bits_left(&s->gb)) {
205  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
206  return AVERROR_INVALIDDATA;
207  }
208 
209  while (len >= 65) {
210  int pr = get_bits(&s->gb, 4);
211  if (pr > 1) {
212  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
213  return AVERROR_INVALIDDATA;
214  }
215  index = get_bits(&s->gb, 4);
216  if (index >= 4)
217  return -1;
218  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
219  /* read quant table */
220  for (i = 0; i < 64; i++) {
221  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
222  if (s->quant_matrixes[index][i] == 0) {
223  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
224  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
225  if (s->avctx->err_recognition & AV_EF_EXPLODE)
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  for (i = 0; i < n; i++) {
274  v = get_bits(&s->gb, 8);
275  val_table[i] = v;
276  }
277  len -= n;
278 
279  /* build VLC and flush previous vlc if present */
280  ff_vlc_free(&s->vlcs[class][index]);
281  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
282  class, index, n);
283  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
284  val_table, class > 0, s->avctx)) < 0)
285  return ret;
286 
287  if (class > 0) {
288  ff_vlc_free(&s->vlcs[2][index]);
289  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
290  val_table, 0, s->avctx)) < 0)
291  return ret;
292  }
293 
294  for (i = 0; i < 16; i++)
295  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
296  for (i = 0; i < 256; i++)
297  s->raw_huffman_values[class][index][i] = val_table[i];
298  }
299  return 0;
300 }
301 
303 {
304  int len, nb_components, i, width, height, bits, ret, size_change;
305  unsigned pix_fmt_id;
306  int h_count[MAX_COMPONENTS] = { 0 };
307  int v_count[MAX_COMPONENTS] = { 0 };
308 
309  s->cur_scan = 0;
310  memset(s->upscale_h, 0, sizeof(s->upscale_h));
311  memset(s->upscale_v, 0, sizeof(s->upscale_v));
312 
313  len = get_bits(&s->gb, 16);
314  bits = get_bits(&s->gb, 8);
315 
316  if (bits > 16 || bits < 1) {
317  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
318  return AVERROR_INVALIDDATA;
319  }
320 
321  if (s->avctx->bits_per_raw_sample != bits) {
322  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
323  s->avctx->bits_per_raw_sample = bits;
324  init_idct(s->avctx);
325  }
326  if (s->pegasus_rct)
327  bits = 9;
328  if (bits == 9 && !s->pegasus_rct)
329  s->rct = 1; // FIXME ugly
330 
331  if(s->lossless && s->avctx->lowres){
332  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
333  return -1;
334  }
335 
336  height = get_bits(&s->gb, 16);
337  width = get_bits(&s->gb, 16);
338 
339  // HACK for odd_height.mov
340  if (s->interlaced && s->width == width && s->height == height + 1)
341  height= s->height;
342 
343  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
344  if (av_image_check_size(width, height, 0, s->avctx) < 0)
345  return AVERROR_INVALIDDATA;
346  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
347  return AVERROR_INVALIDDATA;
348 
349  nb_components = get_bits(&s->gb, 8);
350  if (nb_components <= 0 ||
351  nb_components > MAX_COMPONENTS)
352  return -1;
353  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
354  if (nb_components != s->nb_components) {
355  av_log(s->avctx, AV_LOG_ERROR,
356  "nb_components changing in interlaced picture\n");
357  return AVERROR_INVALIDDATA;
358  }
359  }
360  if (s->ls && !(bits <= 8 || nb_components == 1)) {
362  "JPEG-LS that is not <= 8 "
363  "bits/component or 16-bit gray");
364  return AVERROR_PATCHWELCOME;
365  }
366  if (len != 8 + 3 * nb_components) {
367  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
368  return AVERROR_INVALIDDATA;
369  }
370 
371  s->nb_components = nb_components;
372  s->h_max = 1;
373  s->v_max = 1;
374  for (i = 0; i < nb_components; i++) {
375  /* component id */
376  s->component_id[i] = get_bits(&s->gb, 8);
377  h_count[i] = get_bits(&s->gb, 4);
378  v_count[i] = get_bits(&s->gb, 4);
379  /* compute hmax and vmax (only used in interleaved case) */
380  if (h_count[i] > s->h_max)
381  s->h_max = h_count[i];
382  if (v_count[i] > s->v_max)
383  s->v_max = v_count[i];
384  s->quant_index[i] = get_bits(&s->gb, 8);
385  if (s->quant_index[i] >= 4) {
386  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
387  return AVERROR_INVALIDDATA;
388  }
389  if (!h_count[i] || !v_count[i]) {
390  av_log(s->avctx, AV_LOG_ERROR,
391  "Invalid sampling factor in component %d %d:%d\n",
392  i, h_count[i], v_count[i]);
393  return AVERROR_INVALIDDATA;
394  }
395 
396  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
397  i, h_count[i], v_count[i],
398  s->component_id[i], s->quant_index[i]);
399  }
400  if ( nb_components == 4
401  && s->component_id[0] == 'C'
402  && s->component_id[1] == 'M'
403  && s->component_id[2] == 'Y'
404  && s->component_id[3] == 'K')
405  s->adobe_transform = 0;
406 
407  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
408  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
409  return AVERROR_PATCHWELCOME;
410  }
411 
412  if (s->bayer) {
413  if (nb_components == 2) {
414  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
415  width stored in their SOF3 markers is the width of each one. We only output
416  a single component, therefore we need to adjust the output image width. We
417  handle the deinterleaving (but not the debayering) in this file. */
418  width *= 2;
419  }
420  /* They can also contain 1 component, which is double the width and half the height
421  of the final image (rows are interleaved). We don't handle the decoding in this
422  file, but leave that to the TIFF/DNG decoder. */
423  }
424 
425  /* if different size, realloc/alloc picture */
426  if (width != s->width || height != s->height || bits != s->bits ||
427  memcmp(s->h_count, h_count, sizeof(h_count)) ||
428  memcmp(s->v_count, v_count, sizeof(v_count))) {
429  size_change = 1;
430 
431  s->width = width;
432  s->height = height;
433  s->bits = bits;
434  memcpy(s->h_count, h_count, sizeof(h_count));
435  memcpy(s->v_count, v_count, sizeof(v_count));
436  s->interlaced = 0;
437  s->got_picture = 0;
438 
439  /* test interlaced mode */
440  if (s->first_picture &&
441  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
442  s->orig_height != 0 &&
443  s->height < ((s->orig_height * 3) / 4)) {
444  s->interlaced = 1;
445  s->bottom_field = s->interlace_polarity;
446  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
447  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
448  height *= 2;
449  }
450 
451  ret = ff_set_dimensions(s->avctx, width, height);
452  if (ret < 0)
453  return ret;
454 
455  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
456  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
457  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
458  s->orig_height < height)
459  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
460 
461  s->first_picture = 0;
462  } else {
463  size_change = 0;
464  }
465 
466  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
467  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
468  if (s->avctx->height <= 0)
469  return AVERROR_INVALIDDATA;
470  }
471  if (s->bayer && s->progressive) {
472  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
473  return AVERROR_INVALIDDATA;
474  }
475 
476  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
477  if (s->progressive) {
478  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
479  return AVERROR_INVALIDDATA;
480  }
481  } else {
482  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
483  s->rgb = 1;
484  else if (!s->lossless)
485  s->rgb = 0;
486  /* XXX: not complete test ! */
487  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
488  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
489  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
490  (s->h_count[3] << 4) | s->v_count[3];
491  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
492  /* NOTE we do not allocate pictures large enough for the possible
493  * padding of h/v_count being 4 */
494  if (!(pix_fmt_id & 0xD0D0D0D0))
495  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
496  if (!(pix_fmt_id & 0x0D0D0D0D))
497  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
498 
499  for (i = 0; i < 8; i++) {
500  int j = 6 + (i&1) - (i&6);
501  int is = (pix_fmt_id >> (4*i)) & 0xF;
502  int js = (pix_fmt_id >> (4*j)) & 0xF;
503 
504  if (is == 1 && js != 2 && (i < 2 || i > 5))
505  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
506  if (is == 1 && js != 2 && (i < 2 || i > 5))
507  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
508 
509  if (is == 1 && js == 2) {
510  if (i & 1) s->upscale_h[j/2] = 1;
511  else s->upscale_v[j/2] = 1;
512  }
513  }
514 
515  if (s->bayer) {
516  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
517  goto unk_pixfmt;
518  }
519 
520  switch (pix_fmt_id) {
521  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
522  if (!s->bayer)
523  goto unk_pixfmt;
524  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
525  break;
526  case 0x11111100:
527  if (s->rgb)
528  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
529  else {
530  if ( s->adobe_transform == 0
531  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
532  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
533  } else {
534  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
535  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
536  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
537  }
538  }
539  av_assert0(s->nb_components == 3);
540  break;
541  case 0x11111111:
542  if (s->rgb)
543  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
544  else {
545  if (s->adobe_transform == 0 && s->bits <= 8) {
546  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
547  } else {
548  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
549  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
550  }
551  }
552  av_assert0(s->nb_components == 4);
553  break;
554  case 0x11412100:
555  if (s->bits > 8)
556  goto unk_pixfmt;
557  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
558  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
559  s->upscale_h[0] = 4;
560  s->upscale_h[1] = 0;
561  s->upscale_h[2] = 1;
562  } else {
563  goto unk_pixfmt;
564  }
565  break;
566  case 0x22111122:
567  case 0x22111111:
568  if (s->adobe_transform == 0 && s->bits <= 8) {
569  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
570  s->upscale_v[1] = s->upscale_v[2] = 1;
571  s->upscale_h[1] = s->upscale_h[2] = 1;
572  } else if (s->adobe_transform == 2 && s->bits <= 8) {
573  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
574  s->upscale_v[1] = s->upscale_v[2] = 1;
575  s->upscale_h[1] = s->upscale_h[2] = 1;
576  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
577  } else {
578  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
579  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
580  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
581  }
582  av_assert0(s->nb_components == 4);
583  break;
584  case 0x12121100:
585  case 0x22122100:
586  case 0x21211100:
587  case 0x21112100:
588  case 0x22211200:
589  case 0x22221100:
590  case 0x22112200:
591  case 0x11222200:
592  if (s->bits > 8)
593  goto unk_pixfmt;
594  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
595  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
596  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
597  } else {
598  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
599  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
600  }
601  break;
602  case 0x11000000:
603  case 0x13000000:
604  case 0x14000000:
605  case 0x31000000:
606  case 0x33000000:
607  case 0x34000000:
608  case 0x41000000:
609  case 0x43000000:
610  case 0x44000000:
611  if(s->bits <= 8)
612  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
613  else
614  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
615  break;
616  case 0x12111100:
617  case 0x14121200:
618  case 0x14111100:
619  case 0x22211100:
620  case 0x22112100:
621  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
622  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
623  else
624  goto unk_pixfmt;
625  s->upscale_v[1] = s->upscale_v[2] = 1;
626  } else {
627  if (pix_fmt_id == 0x14111100)
628  s->upscale_v[1] = s->upscale_v[2] = 1;
629  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
630  else
631  goto unk_pixfmt;
632  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
633  }
634  break;
635  case 0x21111100:
636  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
637  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
638  else
639  goto unk_pixfmt;
640  s->upscale_h[1] = s->upscale_h[2] = 1;
641  } else {
642  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
643  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
644  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
645  }
646  break;
647  case 0x11311100:
648  if (s->bits > 8)
649  goto unk_pixfmt;
650  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
651  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
652  else
653  goto unk_pixfmt;
654  s->upscale_h[0] = s->upscale_h[2] = 2;
655  break;
656  case 0x31111100:
657  if (s->bits > 8)
658  goto unk_pixfmt;
659  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
660  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
661  s->upscale_h[1] = s->upscale_h[2] = 2;
662  break;
663  case 0x22121100:
664  case 0x22111200:
665  case 0x41211100:
666  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
667  else
668  goto unk_pixfmt;
669  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
670  break;
671  case 0x22111100:
672  case 0x23111100:
673  case 0x42111100:
674  case 0x24111100:
675  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
676  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
677  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
678  if (pix_fmt_id == 0x42111100) {
679  if (s->bits > 8)
680  goto unk_pixfmt;
681  s->upscale_h[1] = s->upscale_h[2] = 1;
682  } else if (pix_fmt_id == 0x24111100) {
683  if (s->bits > 8)
684  goto unk_pixfmt;
685  s->upscale_v[1] = s->upscale_v[2] = 1;
686  } else if (pix_fmt_id == 0x23111100) {
687  if (s->bits > 8)
688  goto unk_pixfmt;
689  s->upscale_v[1] = s->upscale_v[2] = 2;
690  }
691  break;
692  case 0x41111100:
693  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
694  else
695  goto unk_pixfmt;
696  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
697  break;
698  default:
699  unk_pixfmt:
700  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
701  memset(s->upscale_h, 0, sizeof(s->upscale_h));
702  memset(s->upscale_v, 0, sizeof(s->upscale_v));
703  return AVERROR_PATCHWELCOME;
704  }
705  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
706  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
707  return AVERROR_PATCHWELCOME;
708  }
709  if (s->ls) {
710  memset(s->upscale_h, 0, sizeof(s->upscale_h));
711  memset(s->upscale_v, 0, sizeof(s->upscale_v));
712  if (s->nb_components == 3) {
713  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
714  } else if (s->nb_components != 1) {
715  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
716  return AVERROR_PATCHWELCOME;
717  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
718  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
719  else if (s->bits <= 8)
720  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
721  else
722  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
723  }
724 
725  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
726  if (!s->pix_desc) {
727  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
728  return AVERROR_BUG;
729  }
730 
731  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
732  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
733  } else {
734  enum AVPixelFormat pix_fmts[] = {
735 #if CONFIG_MJPEG_NVDEC_HWACCEL
737 #endif
738 #if CONFIG_MJPEG_VAAPI_HWACCEL
740 #endif
741  s->avctx->pix_fmt,
743  };
744  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
745  if (s->hwaccel_pix_fmt < 0)
746  return AVERROR(EINVAL);
747 
748  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
749  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
750  }
751 
752  if (s->avctx->skip_frame == AVDISCARD_ALL) {
753  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
754  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
755  s->got_picture = 1;
756  return 0;
757  }
758 
759  av_frame_unref(s->picture_ptr);
760  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
761  return -1;
762  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
763  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
764  s->got_picture = 1;
765 
766  // Lets clear the palette to avoid leaving uninitialized values in it
767  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
768  memset(s->picture_ptr->data[1], 0, 1024);
769 
770  for (i = 0; i < 4; i++)
771  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
772 
773  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
774  s->width, s->height, s->linesize[0], s->linesize[1],
775  s->interlaced, s->avctx->height);
776 
777  }
778 
779  if ((s->rgb && !s->lossless && !s->ls) ||
780  (!s->rgb && s->ls && s->nb_components > 1) ||
781  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
782  ) {
783  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
784  return AVERROR_PATCHWELCOME;
785  }
786 
787  /* totally blank picture as progressive JPEG will only add details to it */
788  if (s->progressive) {
789  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
790  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
791  for (i = 0; i < s->nb_components; i++) {
792  int size = bw * bh * s->h_count[i] * s->v_count[i];
793  av_freep(&s->blocks[i]);
794  av_freep(&s->last_nnz[i]);
795  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
796  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
797  if (!s->blocks[i] || !s->last_nnz[i])
798  return AVERROR(ENOMEM);
799  s->block_stride[i] = bw * s->h_count[i];
800  }
801  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
802  }
803 
804  if (s->avctx->hwaccel) {
805  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
806  s->hwaccel_picture_private =
807  av_mallocz(hwaccel->frame_priv_data_size);
808  if (!s->hwaccel_picture_private)
809  return AVERROR(ENOMEM);
810 
811  ret = hwaccel->start_frame(s->avctx, s->raw_image_buffer,
812  s->raw_image_buffer_size);
813  if (ret < 0)
814  return ret;
815  }
816 
817  return 0;
818 }
819 
820 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
821 {
822  int code;
823  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
824  if (code < 0 || code > 16) {
825  av_log(s->avctx, AV_LOG_WARNING,
826  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
827  0, dc_index, &s->vlcs[0][dc_index]);
828  return 0xfffff;
829  }
830 
831  if (code)
832  return get_xbits(&s->gb, code);
833  else
834  return 0;
835 }
836 
837 /* decode block and dequantize */
838 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
839  int dc_index, int ac_index, uint16_t *quant_matrix)
840 {
841  int code, i, j, level, val;
842 
843  /* DC coef */
844  val = mjpeg_decode_dc(s, dc_index);
845  if (val == 0xfffff) {
846  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
847  return AVERROR_INVALIDDATA;
848  }
849  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
850  s->last_dc[component] = val;
851  block[0] = av_clip_int16(val);
852  /* AC coefs */
853  i = 0;
854  {OPEN_READER(re, &s->gb);
855  do {
856  UPDATE_CACHE(re, &s->gb);
857  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
858 
859  i += ((unsigned)code) >> 4;
860  code &= 0xf;
861  if (code) {
862  if (code > MIN_CACHE_BITS - 16)
863  UPDATE_CACHE(re, &s->gb);
864 
865  {
866  int cache = GET_CACHE(re, &s->gb);
867  int sign = (~cache) >> 31;
868  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
869  }
870 
871  LAST_SKIP_BITS(re, &s->gb, code);
872 
873  if (i > 63) {
874  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
875  return AVERROR_INVALIDDATA;
876  }
877  j = s->permutated_scantable[i];
878  block[j] = level * quant_matrix[i];
879  }
880  } while (i < 63);
881  CLOSE_READER(re, &s->gb);}
882 
883  return 0;
884 }
885 
887  int component, int dc_index,
888  uint16_t *quant_matrix, int Al)
889 {
890  unsigned val;
891  s->bdsp.clear_block(block);
892  val = mjpeg_decode_dc(s, dc_index);
893  if (val == 0xfffff) {
894  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
895  return AVERROR_INVALIDDATA;
896  }
897  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
898  s->last_dc[component] = val;
899  block[0] = val;
900  return 0;
901 }
902 
903 /* decode block and dequantize - progressive JPEG version */
905  uint8_t *last_nnz, int ac_index,
906  uint16_t *quant_matrix,
907  int ss, int se, int Al, int *EOBRUN)
908 {
909  int code, i, j, val, run;
910  unsigned level;
911 
912  if (*EOBRUN) {
913  (*EOBRUN)--;
914  return 0;
915  }
916 
917  {
918  OPEN_READER(re, &s->gb);
919  for (i = ss; ; i++) {
920  UPDATE_CACHE(re, &s->gb);
921  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
922 
923  run = ((unsigned) code) >> 4;
924  code &= 0xF;
925  if (code) {
926  i += run;
927  if (code > MIN_CACHE_BITS - 16)
928  UPDATE_CACHE(re, &s->gb);
929 
930  {
931  int cache = GET_CACHE(re, &s->gb);
932  int sign = (~cache) >> 31;
933  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
934  }
935 
936  LAST_SKIP_BITS(re, &s->gb, code);
937 
938  if (i >= se) {
939  if (i == se) {
940  j = s->permutated_scantable[se];
941  block[j] = level * (quant_matrix[se] << Al);
942  break;
943  }
944  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
945  return AVERROR_INVALIDDATA;
946  }
947  j = s->permutated_scantable[i];
948  block[j] = level * (quant_matrix[i] << Al);
949  } else {
950  if (run == 0xF) {// ZRL - skip 15 coefficients
951  i += 15;
952  if (i >= se) {
953  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
954  return AVERROR_INVALIDDATA;
955  }
956  } else {
957  val = (1 << run);
958  if (run) {
959  UPDATE_CACHE(re, &s->gb);
960  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
961  LAST_SKIP_BITS(re, &s->gb, run);
962  }
963  *EOBRUN = val - 1;
964  break;
965  }
966  }
967  }
968  CLOSE_READER(re, &s->gb);
969  }
970 
971  if (i > *last_nnz)
972  *last_nnz = i;
973 
974  return 0;
975 }
976 
977 #define REFINE_BIT(j) { \
978  UPDATE_CACHE(re, &s->gb); \
979  sign = block[j] >> 15; \
980  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
981  ((quant_matrix[i] ^ sign) - sign) << Al; \
982  LAST_SKIP_BITS(re, &s->gb, 1); \
983 }
984 
985 #define ZERO_RUN \
986 for (; ; i++) { \
987  if (i > last) { \
988  i += run; \
989  if (i > se) { \
990  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
991  return -1; \
992  } \
993  break; \
994  } \
995  j = s->permutated_scantable[i]; \
996  if (block[j]) \
997  REFINE_BIT(j) \
998  else if (run-- == 0) \
999  break; \
1000 }
1001 
1002 /* decode block and dequantize - progressive JPEG refinement pass */
1004  uint8_t *last_nnz,
1005  int ac_index, uint16_t *quant_matrix,
1006  int ss, int se, int Al, int *EOBRUN)
1007 {
1008  int code, i = ss, j, sign, val, run;
1009  int last = FFMIN(se, *last_nnz);
1010 
1011  OPEN_READER(re, &s->gb);
1012  if (*EOBRUN) {
1013  (*EOBRUN)--;
1014  } else {
1015  for (; ; i++) {
1016  UPDATE_CACHE(re, &s->gb);
1017  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1018 
1019  if (code & 0xF) {
1020  run = ((unsigned) code) >> 4;
1021  UPDATE_CACHE(re, &s->gb);
1022  val = SHOW_UBITS(re, &s->gb, 1);
1023  LAST_SKIP_BITS(re, &s->gb, 1);
1024  ZERO_RUN;
1025  j = s->permutated_scantable[i];
1026  val--;
1027  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1028  if (i == se) {
1029  if (i > *last_nnz)
1030  *last_nnz = i;
1031  CLOSE_READER(re, &s->gb);
1032  return 0;
1033  }
1034  } else {
1035  run = ((unsigned) code) >> 4;
1036  if (run == 0xF) {
1037  ZERO_RUN;
1038  } else {
1039  val = run;
1040  run = (1 << run);
1041  if (val) {
1042  UPDATE_CACHE(re, &s->gb);
1043  run += SHOW_UBITS(re, &s->gb, val);
1044  LAST_SKIP_BITS(re, &s->gb, val);
1045  }
1046  *EOBRUN = run - 1;
1047  break;
1048  }
1049  }
1050  }
1051 
1052  if (i > *last_nnz)
1053  *last_nnz = i;
1054  }
1055 
1056  for (; i <= last; i++) {
1057  j = s->permutated_scantable[i];
1058  if (block[j])
1059  REFINE_BIT(j)
1060  }
1061  CLOSE_READER(re, &s->gb);
1062 
1063  return 0;
1064 }
1065 #undef REFINE_BIT
1066 #undef ZERO_RUN
1067 
1068 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1069 {
1070  int i;
1071  int reset = 0;
1072 
1073  if (s->restart_interval) {
1074  s->restart_count--;
1075  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1076  align_get_bits(&s->gb);
1077  for (i = 0; i < nb_components; i++) /* reset dc */
1078  s->last_dc[i] = (4 << s->bits);
1079  }
1080 
1081  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1082  /* skip RSTn */
1083  if (s->restart_count == 0) {
1084  if( show_bits(&s->gb, i) == (1 << i) - 1
1085  || show_bits(&s->gb, i) == 0xFF) {
1086  int pos = get_bits_count(&s->gb);
1087  align_get_bits(&s->gb);
1088  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1089  skip_bits(&s->gb, 8);
1090  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1091  for (i = 0; i < nb_components; i++) /* reset dc */
1092  s->last_dc[i] = (4 << s->bits);
1093  reset = 1;
1094  } else
1095  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1096  }
1097  }
1098  }
1099  return reset;
1100 }
1101 
1102 /* Handles 1 to 4 components */
1103 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1104 {
1105  int i, mb_x, mb_y;
1106  unsigned width;
1107  uint16_t (*buffer)[4];
1108  int left[4], top[4], topleft[4];
1109  const int linesize = s->linesize[0];
1110  const int mask = ((1 << s->bits) - 1) << point_transform;
1111  int resync_mb_y = 0;
1112  int resync_mb_x = 0;
1113  int vpred[6];
1114 
1115  if (!s->bayer && s->nb_components < 3)
1116  return AVERROR_INVALIDDATA;
1117  if (s->bayer && s->nb_components > 2)
1118  return AVERROR_INVALIDDATA;
1119  if (s->nb_components <= 0 || s->nb_components > 4)
1120  return AVERROR_INVALIDDATA;
1121  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1122  return AVERROR_INVALIDDATA;
1123  if (s->bayer) {
1124  if (s->rct || s->pegasus_rct)
1125  return AVERROR_INVALIDDATA;
1126  }
1127 
1128 
1129  s->restart_count = s->restart_interval;
1130 
1131  if (s->restart_interval == 0)
1132  s->restart_interval = INT_MAX;
1133 
1134  if (s->bayer)
1135  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1136  else
1137  width = s->mb_width;
1138 
1139  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1140  if (!s->ljpeg_buffer)
1141  return AVERROR(ENOMEM);
1142 
1143  buffer = s->ljpeg_buffer;
1144 
1145  for (i = 0; i < 4; i++)
1146  buffer[0][i] = 1 << (s->bits - 1);
1147 
1148  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1149  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1150 
1151  if (s->interlaced && s->bottom_field)
1152  ptr += linesize >> 1;
1153 
1154  for (i = 0; i < 4; i++)
1155  top[i] = left[i] = topleft[i] = buffer[0][i];
1156 
1157  if ((mb_y * s->width) % s->restart_interval == 0) {
1158  for (i = 0; i < 6; i++)
1159  vpred[i] = 1 << (s->bits-1);
1160  }
1161 
1162  for (mb_x = 0; mb_x < width; mb_x++) {
1163  int modified_predictor = predictor;
1164 
1165  if (get_bits_left(&s->gb) < 1) {
1166  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1167  return AVERROR_INVALIDDATA;
1168  }
1169 
1170  if (s->restart_interval && !s->restart_count){
1171  s->restart_count = s->restart_interval;
1172  resync_mb_x = mb_x;
1173  resync_mb_y = mb_y;
1174  for(i=0; i<4; i++)
1175  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1176  }
1177  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1178  modified_predictor = 1;
1179 
1180  for (i=0;i<nb_components;i++) {
1181  int pred, dc;
1182 
1183  topleft[i] = top[i];
1184  top[i] = buffer[mb_x][i];
1185 
1186  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1187  if(dc == 0xFFFFF)
1188  return -1;
1189 
1190  if (!s->bayer || mb_x) {
1191  pred = left[i];
1192  } else { /* This path runs only for the first line in bayer images */
1193  vpred[i] += dc;
1194  pred = vpred[i] - dc;
1195  }
1196 
1197  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1198 
1199  left[i] = buffer[mb_x][i] =
1200  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1201  }
1202 
1203  if (s->restart_interval && !--s->restart_count) {
1204  align_get_bits(&s->gb);
1205  skip_bits(&s->gb, 16); /* skip RSTn */
1206  }
1207  }
1208  if (s->rct && s->nb_components == 4) {
1209  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1210  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1211  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1212  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1213  ptr[4*mb_x + 0] = buffer[mb_x][3];
1214  }
1215  } else if (s->nb_components == 4) {
1216  for(i=0; i<nb_components; i++) {
1217  int c= s->comp_index[i];
1218  if (s->bits <= 8) {
1219  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1220  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1221  }
1222  } else if(s->bits == 9) {
1223  return AVERROR_PATCHWELCOME;
1224  } else {
1225  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1226  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1227  }
1228  }
1229  }
1230  } else if (s->rct) {
1231  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1232  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1233  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1234  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1235  }
1236  } else if (s->pegasus_rct) {
1237  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1238  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1239  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1240  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1241  }
1242  } else if (s->bayer) {
1243  if (s->bits <= 8)
1244  return AVERROR_PATCHWELCOME;
1245  if (nb_components == 1) {
1246  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1247  for (mb_x = 0; mb_x < width; mb_x++)
1248  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1249  } else if (nb_components == 2) {
1250  for (mb_x = 0; mb_x < width; mb_x++) {
1251  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1252  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1253  }
1254  }
1255  } else {
1256  for(i=0; i<nb_components; i++) {
1257  int c= s->comp_index[i];
1258  if (s->bits <= 8) {
1259  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1260  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1261  }
1262  } else if(s->bits == 9) {
1263  return AVERROR_PATCHWELCOME;
1264  } else {
1265  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1266  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1267  }
1268  }
1269  }
1270  }
1271  }
1272  return 0;
1273 }
1274 
1276  int point_transform, int nb_components)
1277 {
1278  int i, mb_x, mb_y, mask;
1279  int bits= (s->bits+7)&~7;
1280  int resync_mb_y = 0;
1281  int resync_mb_x = 0;
1282 
1283  point_transform += bits - s->bits;
1284  mask = ((1 << s->bits) - 1) << point_transform;
1285 
1286  av_assert0(nb_components>=1 && nb_components<=4);
1287 
1288  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1289  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1290  if (get_bits_left(&s->gb) < 1) {
1291  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1292  return AVERROR_INVALIDDATA;
1293  }
1294  if (s->restart_interval && !s->restart_count){
1295  s->restart_count = s->restart_interval;
1296  resync_mb_x = mb_x;
1297  resync_mb_y = mb_y;
1298  }
1299 
1300  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1301  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1302  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1303  for (i = 0; i < nb_components; i++) {
1304  uint8_t *ptr;
1305  uint16_t *ptr16;
1306  int n, h, v, x, y, c, j, linesize;
1307  n = s->nb_blocks[i];
1308  c = s->comp_index[i];
1309  h = s->h_scount[i];
1310  v = s->v_scount[i];
1311  x = 0;
1312  y = 0;
1313  linesize= s->linesize[c];
1314 
1315  if(bits>8) linesize /= 2;
1316 
1317  for(j=0; j<n; j++) {
1318  int pred, dc;
1319 
1320  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1321  if(dc == 0xFFFFF)
1322  return -1;
1323  if ( h * mb_x + x >= s->width
1324  || v * mb_y + y >= s->height) {
1325  // Nothing to do
1326  } else if (bits<=8) {
1327  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1328  if(y==0 && toprow){
1329  if(x==0 && leftcol){
1330  pred= 1 << (bits - 1);
1331  }else{
1332  pred= ptr[-1];
1333  }
1334  }else{
1335  if(x==0 && leftcol){
1336  pred= ptr[-linesize];
1337  }else{
1338  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1339  }
1340  }
1341 
1342  if (s->interlaced && s->bottom_field)
1343  ptr += linesize >> 1;
1344  pred &= mask;
1345  *ptr= pred + ((unsigned)dc << point_transform);
1346  }else{
1347  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1348  if(y==0 && toprow){
1349  if(x==0 && leftcol){
1350  pred= 1 << (bits - 1);
1351  }else{
1352  pred= ptr16[-1];
1353  }
1354  }else{
1355  if(x==0 && leftcol){
1356  pred= ptr16[-linesize];
1357  }else{
1358  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1359  }
1360  }
1361 
1362  if (s->interlaced && s->bottom_field)
1363  ptr16 += linesize >> 1;
1364  pred &= mask;
1365  *ptr16= pred + ((unsigned)dc << point_transform);
1366  }
1367  if (++x == h) {
1368  x = 0;
1369  y++;
1370  }
1371  }
1372  }
1373  } else {
1374  for (i = 0; i < nb_components; i++) {
1375  uint8_t *ptr;
1376  uint16_t *ptr16;
1377  int n, h, v, x, y, c, j, linesize, dc;
1378  n = s->nb_blocks[i];
1379  c = s->comp_index[i];
1380  h = s->h_scount[i];
1381  v = s->v_scount[i];
1382  x = 0;
1383  y = 0;
1384  linesize = s->linesize[c];
1385 
1386  if(bits>8) linesize /= 2;
1387 
1388  for (j = 0; j < n; j++) {
1389  int pred;
1390 
1391  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1392  if(dc == 0xFFFFF)
1393  return -1;
1394  if ( h * mb_x + x >= s->width
1395  || v * mb_y + y >= s->height) {
1396  // Nothing to do
1397  } else if (bits<=8) {
1398  ptr = s->picture_ptr->data[c] +
1399  (linesize * (v * mb_y + y)) +
1400  (h * mb_x + x); //FIXME optimize this crap
1401  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1402 
1403  pred &= mask;
1404  *ptr = pred + ((unsigned)dc << point_transform);
1405  }else{
1406  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1407  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1408 
1409  pred &= mask;
1410  *ptr16= pred + ((unsigned)dc << point_transform);
1411  }
1412 
1413  if (++x == h) {
1414  x = 0;
1415  y++;
1416  }
1417  }
1418  }
1419  }
1420  if (s->restart_interval && !--s->restart_count) {
1421  align_get_bits(&s->gb);
1422  skip_bits(&s->gb, 16); /* skip RSTn */
1423  }
1424  }
1425  }
1426  return 0;
1427 }
1428 
1430  uint8_t *dst, const uint8_t *src,
1431  int linesize, int lowres)
1432 {
1433  switch (lowres) {
1434  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1435  break;
1436  case 1: copy_block4(dst, src, linesize, linesize, 4);
1437  break;
1438  case 2: copy_block2(dst, src, linesize, linesize, 2);
1439  break;
1440  case 3: *dst = *src;
1441  break;
1442  }
1443 }
1444 
1445 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1446 {
1447  int block_x, block_y;
1448  int size = 8 >> s->avctx->lowres;
1449  if (s->bits > 8) {
1450  for (block_y=0; block_y<size; block_y++)
1451  for (block_x=0; block_x<size; block_x++)
1452  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1453  } else {
1454  for (block_y=0; block_y<size; block_y++)
1455  for (block_x=0; block_x<size; block_x++)
1456  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1457  }
1458 }
1459 
1460 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1461  int Al, const uint8_t *mb_bitmask,
1462  int mb_bitmask_size,
1463  const AVFrame *reference)
1464 {
1465  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1466  uint8_t *data[MAX_COMPONENTS];
1467  const uint8_t *reference_data[MAX_COMPONENTS];
1468  int linesize[MAX_COMPONENTS];
1469  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1470  int bytes_per_pixel = 1 + (s->bits > 8);
1471 
1472  if (mb_bitmask) {
1473  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1474  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1475  return AVERROR_INVALIDDATA;
1476  }
1477  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1478  }
1479 
1480  s->restart_count = 0;
1481 
1482  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1483  &chroma_v_shift);
1484  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1485  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1486 
1487  for (i = 0; i < nb_components; i++) {
1488  int c = s->comp_index[i];
1489  data[c] = s->picture_ptr->data[c];
1490  reference_data[c] = reference ? reference->data[c] : NULL;
1491  linesize[c] = s->linesize[c];
1492  s->coefs_finished[c] |= 1;
1493  }
1494 
1495  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1496  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1497  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1498 
1499  if (s->restart_interval && !s->restart_count)
1500  s->restart_count = s->restart_interval;
1501 
1502  if (get_bits_left(&s->gb) < 0) {
1503  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1504  -get_bits_left(&s->gb));
1505  return AVERROR_INVALIDDATA;
1506  }
1507  for (i = 0; i < nb_components; i++) {
1508  uint8_t *ptr;
1509  int n, h, v, x, y, c, j;
1510  int block_offset;
1511  n = s->nb_blocks[i];
1512  c = s->comp_index[i];
1513  h = s->h_scount[i];
1514  v = s->v_scount[i];
1515  x = 0;
1516  y = 0;
1517  for (j = 0; j < n; j++) {
1518  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1519  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1520 
1521  if (s->interlaced && s->bottom_field)
1522  block_offset += linesize[c] >> 1;
1523  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1524  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1525  ptr = data[c] + block_offset;
1526  } else
1527  ptr = NULL;
1528  if (!s->progressive) {
1529  if (copy_mb) {
1530  if (ptr)
1531  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1532  linesize[c], s->avctx->lowres);
1533 
1534  } else {
1535  s->bdsp.clear_block(s->block);
1536  if (decode_block(s, s->block, i,
1537  s->dc_index[i], s->ac_index[i],
1538  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1539  av_log(s->avctx, AV_LOG_ERROR,
1540  "error y=%d x=%d\n", mb_y, mb_x);
1541  return AVERROR_INVALIDDATA;
1542  }
1543  if (ptr && linesize[c]) {
1544  s->idsp.idct_put(ptr, linesize[c], s->block);
1545  if (s->bits & 7)
1546  shift_output(s, ptr, linesize[c]);
1547  }
1548  }
1549  } else {
1550  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1551  (h * mb_x + x);
1552  int16_t *block = s->blocks[c][block_idx];
1553  if (Ah)
1554  block[0] += get_bits1(&s->gb) *
1555  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1556  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1557  s->quant_matrixes[s->quant_sindex[i]],
1558  Al) < 0) {
1559  av_log(s->avctx, AV_LOG_ERROR,
1560  "error y=%d x=%d\n", mb_y, mb_x);
1561  return AVERROR_INVALIDDATA;
1562  }
1563  }
1564  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1565  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1566  mb_x, mb_y, x, y, c, s->bottom_field,
1567  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1568  if (++x == h) {
1569  x = 0;
1570  y++;
1571  }
1572  }
1573  }
1574 
1575  handle_rstn(s, nb_components);
1576  }
1577  }
1578  return 0;
1579 }
1580 
1582  int se, int Ah, int Al)
1583 {
1584  int mb_x, mb_y;
1585  int EOBRUN = 0;
1586  int c = s->comp_index[0];
1587  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1588 
1589  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1590  if (se < ss || se > 63) {
1591  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1592  return AVERROR_INVALIDDATA;
1593  }
1594 
1595  // s->coefs_finished is a bitmask for coefficients coded
1596  // ss and se are parameters telling start and end coefficients
1597  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1598 
1599  s->restart_count = 0;
1600 
1601  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1602  int block_idx = mb_y * s->block_stride[c];
1603  int16_t (*block)[64] = &s->blocks[c][block_idx];
1604  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1605  if (get_bits_left(&s->gb) <= 0) {
1606  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1607  return AVERROR_INVALIDDATA;
1608  }
1609  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1610  int ret;
1611  if (s->restart_interval && !s->restart_count)
1612  s->restart_count = s->restart_interval;
1613 
1614  if (Ah)
1615  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1616  quant_matrix, ss, se, Al, &EOBRUN);
1617  else
1618  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1619  quant_matrix, ss, se, Al, &EOBRUN);
1620 
1621  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1623  if (ret < 0) {
1624  av_log(s->avctx, AV_LOG_ERROR,
1625  "error y=%d x=%d\n", mb_y, mb_x);
1626  return AVERROR_INVALIDDATA;
1627  }
1628 
1629  if (handle_rstn(s, 0))
1630  EOBRUN = 0;
1631  }
1632  }
1633  return 0;
1634 }
1635 
1637 {
1638  int mb_x, mb_y;
1639  int c;
1640  const int bytes_per_pixel = 1 + (s->bits > 8);
1641  const int block_size = s->lossless ? 1 : 8;
1642 
1643  for (c = 0; c < s->nb_components; c++) {
1644  uint8_t *data = s->picture_ptr->data[c];
1645  int linesize = s->linesize[c];
1646  int h = s->h_max / s->h_count[c];
1647  int v = s->v_max / s->v_count[c];
1648  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1649  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1650 
1651  if (~s->coefs_finished[c])
1652  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1653 
1654  if (s->interlaced && s->bottom_field)
1655  data += linesize >> 1;
1656 
1657  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1658  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1659  int block_idx = mb_y * s->block_stride[c];
1660  int16_t (*block)[64] = &s->blocks[c][block_idx];
1661  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1662  s->idsp.idct_put(ptr, linesize, *block);
1663  if (s->bits & 7)
1664  shift_output(s, ptr, linesize);
1665  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1666  }
1667  }
1668  }
1669 }
1670 
1671 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1672  int mb_bitmask_size, const AVFrame *reference)
1673 {
1674  int len, nb_components, i, h, v, predictor, point_transform;
1675  int index, id, ret;
1676  const int block_size = s->lossless ? 1 : 8;
1677  int ilv, prev_shift;
1678 
1679  if (!s->got_picture) {
1680  av_log(s->avctx, AV_LOG_WARNING,
1681  "Can not process SOS before SOF, skipping\n");
1682  return -1;
1683  }
1684 
1685  if (reference) {
1686  if (reference->width != s->picture_ptr->width ||
1687  reference->height != s->picture_ptr->height ||
1688  reference->format != s->picture_ptr->format) {
1689  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1690  return AVERROR_INVALIDDATA;
1691  }
1692  }
1693 
1694  /* XXX: verify len field validity */
1695  len = get_bits(&s->gb, 16);
1696  nb_components = get_bits(&s->gb, 8);
1697  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1699  "decode_sos: nb_components (%d)",
1700  nb_components);
1701  return AVERROR_PATCHWELCOME;
1702  }
1703  if (len != 6 + 2 * nb_components) {
1704  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1705  return AVERROR_INVALIDDATA;
1706  }
1707  for (i = 0; i < nb_components; i++) {
1708  id = get_bits(&s->gb, 8);
1709  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1710  /* find component index */
1711  for (index = 0; index < s->nb_components; index++)
1712  if (id == s->component_id[index])
1713  break;
1714  if (index == s->nb_components) {
1715  av_log(s->avctx, AV_LOG_ERROR,
1716  "decode_sos: index(%d) out of components\n", index);
1717  return AVERROR_INVALIDDATA;
1718  }
1719  /* Metasoft MJPEG codec has Cb and Cr swapped */
1720  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1721  && nb_components == 3 && s->nb_components == 3 && i)
1722  index = 3 - i;
1723 
1724  s->quant_sindex[i] = s->quant_index[index];
1725  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1726  s->h_scount[i] = s->h_count[index];
1727  s->v_scount[i] = s->v_count[index];
1728 
1729  s->comp_index[i] = index;
1730 
1731  s->dc_index[i] = get_bits(&s->gb, 4);
1732  s->ac_index[i] = get_bits(&s->gb, 4);
1733 
1734  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1735  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1736  goto out_of_range;
1737  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1738  goto out_of_range;
1739  }
1740 
1741  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1742  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1743  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1744  prev_shift = get_bits(&s->gb, 4); /* Ah */
1745  point_transform = get_bits(&s->gb, 4); /* Al */
1746  }else
1747  prev_shift = point_transform = 0;
1748 
1749  if (nb_components > 1) {
1750  /* interleaved stream */
1751  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1752  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1753  } else if (!s->ls) { /* skip this for JPEG-LS */
1754  h = s->h_max / s->h_scount[0];
1755  v = s->v_max / s->v_scount[0];
1756  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1757  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1758  s->nb_blocks[0] = 1;
1759  s->h_scount[0] = 1;
1760  s->v_scount[0] = 1;
1761  }
1762 
1763  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1764  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1765  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1766  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1767  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1768 
1769 
1770  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1771  for (i = s->mjpb_skiptosod; i > 0; i--)
1772  skip_bits(&s->gb, 8);
1773 
1774 next_field:
1775  for (i = 0; i < nb_components; i++)
1776  s->last_dc[i] = (4 << s->bits);
1777 
1778  if (s->avctx->hwaccel) {
1779  int bytes_to_start = get_bits_count(&s->gb) / 8;
1780  av_assert0(bytes_to_start >= 0 &&
1781  s->raw_scan_buffer_size >= bytes_to_start);
1782 
1783  ret = FF_HW_CALL(s->avctx, decode_slice,
1784  s->raw_scan_buffer + bytes_to_start,
1785  s->raw_scan_buffer_size - bytes_to_start);
1786  if (ret < 0)
1787  return ret;
1788 
1789  } else if (s->lossless) {
1790  av_assert0(s->picture_ptr == s->picture);
1791  if (CONFIG_JPEGLS_DECODER && s->ls) {
1792 // for () {
1793 // reset_ls_coding_parameters(s, 0);
1794 
1796  point_transform, ilv)) < 0)
1797  return ret;
1798  } else {
1799  if (s->rgb || s->bayer) {
1800  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1801  return ret;
1802  } else {
1804  point_transform,
1805  nb_components)) < 0)
1806  return ret;
1807  }
1808  }
1809  } else {
1810  if (s->progressive && predictor) {
1811  av_assert0(s->picture_ptr == s->picture);
1813  ilv, prev_shift,
1814  point_transform)) < 0)
1815  return ret;
1816  } else {
1817  if ((ret = mjpeg_decode_scan(s, nb_components,
1818  prev_shift, point_transform,
1819  mb_bitmask, mb_bitmask_size, reference)) < 0)
1820  return ret;
1821  }
1822  }
1823 
1824  if (s->interlaced &&
1825  get_bits_left(&s->gb) > 32 &&
1826  show_bits(&s->gb, 8) == 0xFF) {
1827  GetBitContext bak = s->gb;
1828  align_get_bits(&bak);
1829  if (show_bits(&bak, 16) == 0xFFD1) {
1830  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1831  s->gb = bak;
1832  skip_bits(&s->gb, 16);
1833  s->bottom_field ^= 1;
1834 
1835  goto next_field;
1836  }
1837  }
1838 
1839  emms_c();
1840  return 0;
1841  out_of_range:
1842  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1843  return AVERROR_INVALIDDATA;
1844 }
1845 
1847 {
1848  if (get_bits(&s->gb, 16) != 4)
1849  return AVERROR_INVALIDDATA;
1850  s->restart_interval = get_bits(&s->gb, 16);
1851  s->restart_count = 0;
1852  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1853  s->restart_interval);
1854 
1855  return 0;
1856 }
1857 
1859 {
1860  int len, id, i;
1861 
1862  len = get_bits(&s->gb, 16);
1863  if (len < 6) {
1864  if (s->bayer) {
1865  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1866  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1867  skip_bits(&s->gb, len);
1868  return 0;
1869  } else
1870  return AVERROR_INVALIDDATA;
1871  }
1872  if (8 * len > get_bits_left(&s->gb))
1873  return AVERROR_INVALIDDATA;
1874 
1875  id = get_bits_long(&s->gb, 32);
1876  len -= 6;
1877 
1878  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1879  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1880  av_fourcc2str(av_bswap32(id)), id, len);
1881 
1882  /* Buggy AVID, it puts EOI only at every 10th frame. */
1883  /* Also, this fourcc is used by non-avid files too, it holds some
1884  information, but it's always present in AVID-created files. */
1885  if (id == AV_RB32("AVI1")) {
1886  /* structure:
1887  4bytes AVI1
1888  1bytes polarity
1889  1bytes always zero
1890  4bytes field_size
1891  4bytes field_size_less_padding
1892  */
1893  s->buggy_avid = 1;
1894  i = get_bits(&s->gb, 8); len--;
1895  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1896  goto out;
1897  }
1898 
1899  if (id == AV_RB32("JFIF")) {
1900  int t_w, t_h, v1, v2;
1901  if (len < 8)
1902  goto out;
1903  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1904  v1 = get_bits(&s->gb, 8);
1905  v2 = get_bits(&s->gb, 8);
1906  skip_bits(&s->gb, 8);
1907 
1908  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1909  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1910  if ( s->avctx->sample_aspect_ratio.num <= 0
1911  || s->avctx->sample_aspect_ratio.den <= 0) {
1912  s->avctx->sample_aspect_ratio.num = 0;
1913  s->avctx->sample_aspect_ratio.den = 1;
1914  }
1915 
1916  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1917  av_log(s->avctx, AV_LOG_INFO,
1918  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1919  v1, v2,
1920  s->avctx->sample_aspect_ratio.num,
1921  s->avctx->sample_aspect_ratio.den);
1922 
1923  len -= 8;
1924  if (len >= 2) {
1925  t_w = get_bits(&s->gb, 8);
1926  t_h = get_bits(&s->gb, 8);
1927  if (t_w && t_h) {
1928  /* skip thumbnail */
1929  if (len -10 - (t_w * t_h * 3) > 0)
1930  len -= t_w * t_h * 3;
1931  }
1932  len -= 2;
1933  }
1934  goto out;
1935  }
1936 
1937  if ( id == AV_RB32("Adob")
1938  && len >= 7
1939  && show_bits(&s->gb, 8) == 'e'
1940  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1941  skip_bits(&s->gb, 8); /* 'e' */
1942  skip_bits(&s->gb, 16); /* version */
1943  skip_bits(&s->gb, 16); /* flags0 */
1944  skip_bits(&s->gb, 16); /* flags1 */
1945  s->adobe_transform = get_bits(&s->gb, 8);
1946  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1947  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1948  len -= 7;
1949  goto out;
1950  }
1951 
1952  if (id == AV_RB32("LJIF")) {
1953  int rgb = s->rgb;
1954  int pegasus_rct = s->pegasus_rct;
1955  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1956  av_log(s->avctx, AV_LOG_INFO,
1957  "Pegasus lossless jpeg header found\n");
1958  skip_bits(&s->gb, 16); /* version ? */
1959  skip_bits(&s->gb, 16); /* unknown always 0? */
1960  skip_bits(&s->gb, 16); /* unknown always 0? */
1961  skip_bits(&s->gb, 16); /* unknown always 0? */
1962  switch (i=get_bits(&s->gb, 8)) {
1963  case 1:
1964  rgb = 1;
1965  pegasus_rct = 0;
1966  break;
1967  case 2:
1968  rgb = 1;
1969  pegasus_rct = 1;
1970  break;
1971  default:
1972  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1973  }
1974 
1975  len -= 9;
1976  if (s->bayer)
1977  goto out;
1978  if (s->got_picture)
1979  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1980  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1981  goto out;
1982  }
1983 
1984  s->rgb = rgb;
1985  s->pegasus_rct = pegasus_rct;
1986 
1987  goto out;
1988  }
1989  if (id == AV_RL32("colr") && len > 0) {
1990  s->colr = get_bits(&s->gb, 8);
1991  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1992  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1993  len --;
1994  goto out;
1995  }
1996  if (id == AV_RL32("xfrm") && len > 0) {
1997  s->xfrm = get_bits(&s->gb, 8);
1998  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1999  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
2000  len --;
2001  goto out;
2002  }
2003 
2004  /* JPS extension by VRex */
2005  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2006  int flags, layout, type;
2007  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2008  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2009 
2010  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2011  skip_bits(&s->gb, 16); len -= 2; /* block length */
2012  skip_bits(&s->gb, 8); /* reserved */
2013  flags = get_bits(&s->gb, 8);
2014  layout = get_bits(&s->gb, 8);
2015  type = get_bits(&s->gb, 8);
2016  len -= 4;
2017 
2018  av_freep(&s->stereo3d);
2019  s->stereo3d = av_stereo3d_alloc();
2020  if (!s->stereo3d) {
2021  goto out;
2022  }
2023  if (type == 0) {
2024  s->stereo3d->type = AV_STEREO3D_2D;
2025  } else if (type == 1) {
2026  switch (layout) {
2027  case 0x01:
2028  s->stereo3d->type = AV_STEREO3D_LINES;
2029  break;
2030  case 0x02:
2031  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2032  break;
2033  case 0x03:
2034  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2035  break;
2036  }
2037  if (!(flags & 0x04)) {
2038  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2039  }
2040  }
2041  goto out;
2042  }
2043 
2044  /* EXIF metadata */
2045  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2046  GetByteContext gbytes;
2047  int ret, le, ifd_offset, bytes_read;
2048  const uint8_t *aligned;
2049 
2050  skip_bits(&s->gb, 16); // skip padding
2051  len -= 2;
2052 
2053  // init byte wise reading
2054  aligned = align_get_bits(&s->gb);
2055  bytestream2_init(&gbytes, aligned, len);
2056 
2057  // read TIFF header
2058  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2059  if (ret) {
2060  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2061  } else {
2062  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2063 
2064  // read 0th IFD and store the metadata
2065  // (return values > 0 indicate the presence of subimage metadata)
2066  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2067  if (ret < 0) {
2068  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2069  }
2070  }
2071 
2072  bytes_read = bytestream2_tell(&gbytes);
2073  skip_bits(&s->gb, bytes_read << 3);
2074  len -= bytes_read;
2075 
2076  goto out;
2077  }
2078 
2079  /* Apple MJPEG-A */
2080  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2081  id = get_bits_long(&s->gb, 32);
2082  len -= 4;
2083  /* Apple MJPEG-A */
2084  if (id == AV_RB32("mjpg")) {
2085  /* structure:
2086  4bytes field size
2087  4bytes pad field size
2088  4bytes next off
2089  4bytes quant off
2090  4bytes huff off
2091  4bytes image off
2092  4bytes scan off
2093  4bytes data off
2094  */
2095  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2096  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2097  }
2098  }
2099 
2100  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2101  int id2;
2102  unsigned seqno;
2103  unsigned nummarkers;
2104 
2105  id = get_bits_long(&s->gb, 32);
2106  id2 = get_bits(&s->gb, 24);
2107  len -= 7;
2108  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2109  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2110  goto out;
2111  }
2112 
2113  skip_bits(&s->gb, 8);
2114  seqno = get_bits(&s->gb, 8);
2115  len -= 2;
2116  if (seqno == 0) {
2117  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2118  goto out;
2119  }
2120 
2121  nummarkers = get_bits(&s->gb, 8);
2122  len -= 1;
2123  if (nummarkers == 0) {
2124  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2125  goto out;
2126  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2127  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2128  goto out;
2129  } else if (seqno > nummarkers) {
2130  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2131  goto out;
2132  }
2133 
2134  /* Allocate if this is the first APP2 we've seen. */
2135  if (s->iccnum == 0) {
2136  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2137  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2138  return AVERROR(ENOMEM);
2139  }
2140  s->iccnum = nummarkers;
2141  }
2142 
2143  if (s->iccentries[seqno - 1].data) {
2144  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2145  goto out;
2146  }
2147 
2148  s->iccentries[seqno - 1].length = len;
2149  s->iccentries[seqno - 1].data = av_malloc(len);
2150  if (!s->iccentries[seqno - 1].data) {
2151  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2152  return AVERROR(ENOMEM);
2153  }
2154 
2155  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2156  skip_bits(&s->gb, len << 3);
2157  len = 0;
2158  s->iccread++;
2159 
2160  if (s->iccread > s->iccnum)
2161  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2162  }
2163 
2164 out:
2165  /* slow but needed for extreme adobe jpegs */
2166  if (len < 0)
2167  av_log(s->avctx, AV_LOG_ERROR,
2168  "mjpeg: error, decode_app parser read over the end\n");
2169  while (--len > 0)
2170  skip_bits(&s->gb, 8);
2171 
2172  return 0;
2173 }
2174 
2176 {
2177  int len = get_bits(&s->gb, 16);
2178  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2179  int i;
2180  char *cbuf = av_malloc(len - 1);
2181  if (!cbuf)
2182  return AVERROR(ENOMEM);
2183 
2184  for (i = 0; i < len - 2; i++)
2185  cbuf[i] = get_bits(&s->gb, 8);
2186  if (i > 0 && cbuf[i - 1] == '\n')
2187  cbuf[i - 1] = 0;
2188  else
2189  cbuf[i] = 0;
2190 
2191  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2192  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2193 
2194  /* buggy avid, it puts EOI only at every 10th frame */
2195  if (!strncmp(cbuf, "AVID", 4)) {
2196  parse_avid(s, cbuf, len);
2197  } else if (!strcmp(cbuf, "CS=ITU601"))
2198  s->cs_itu601 = 1;
2199  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2200  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2201  s->flipped = 1;
2202  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2203  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2204  s->multiscope = 2;
2205  }
2206 
2207  av_free(cbuf);
2208  }
2209 
2210  return 0;
2211 }
2212 
2213 /* return the 8 bit start code value and update the search
2214  state. Return -1 if no start code found */
2215 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2216 {
2217  const uint8_t *buf_ptr;
2218  unsigned int v, v2;
2219  int val;
2220  int skipped = 0;
2221 
2222  buf_ptr = *pbuf_ptr;
2223  while (buf_end - buf_ptr > 1) {
2224  v = *buf_ptr++;
2225  v2 = *buf_ptr;
2226  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2227  val = *buf_ptr++;
2228  goto found;
2229  }
2230  skipped++;
2231  }
2232  buf_ptr = buf_end;
2233  val = -1;
2234 found:
2235  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2236  *pbuf_ptr = buf_ptr;
2237  return val;
2238 }
2239 
2241  const uint8_t **buf_ptr, const uint8_t *buf_end,
2242  const uint8_t **unescaped_buf_ptr,
2243  int *unescaped_buf_size)
2244 {
2245  int start_code;
2246  start_code = find_marker(buf_ptr, buf_end);
2247 
2248  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2249  if (!s->buffer)
2250  return AVERROR(ENOMEM);
2251 
2252  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2253  if (start_code == SOS && !s->ls) {
2254  const uint8_t *src = *buf_ptr;
2255  const uint8_t *ptr = src;
2256  uint8_t *dst = s->buffer;
2257 
2258  #define copy_data_segment(skip) do { \
2259  ptrdiff_t length = (ptr - src) - (skip); \
2260  if (length > 0) { \
2261  memcpy(dst, src, length); \
2262  dst += length; \
2263  src = ptr; \
2264  } \
2265  } while (0)
2266 
2267  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2268  ptr = buf_end;
2269  copy_data_segment(0);
2270  } else {
2271  while (ptr < buf_end) {
2272  uint8_t x = *(ptr++);
2273 
2274  if (x == 0xff) {
2275  ptrdiff_t skip = 0;
2276  while (ptr < buf_end && x == 0xff) {
2277  x = *(ptr++);
2278  skip++;
2279  }
2280 
2281  /* 0xFF, 0xFF, ... */
2282  if (skip > 1) {
2284 
2285  /* decrement src as it is equal to ptr after the
2286  * copy_data_segment macro and we might want to
2287  * copy the current value of x later on */
2288  src--;
2289  }
2290 
2291  if (x < RST0 || x > RST7) {
2292  copy_data_segment(1);
2293  if (x)
2294  break;
2295  }
2296  }
2297  }
2298  if (src < ptr)
2299  copy_data_segment(0);
2300  }
2301  #undef copy_data_segment
2302 
2303  *unescaped_buf_ptr = s->buffer;
2304  *unescaped_buf_size = dst - s->buffer;
2305  memset(s->buffer + *unescaped_buf_size, 0,
2307 
2308  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2309  (buf_end - *buf_ptr) - (dst - s->buffer));
2310  } else if (start_code == SOS && s->ls) {
2311  const uint8_t *src = *buf_ptr;
2312  uint8_t *dst = s->buffer;
2313  int bit_count = 0;
2314  int t = 0, b = 0;
2315  PutBitContext pb;
2316 
2317  /* find marker */
2318  while (src + t < buf_end) {
2319  uint8_t x = src[t++];
2320  if (x == 0xff) {
2321  while ((src + t < buf_end) && x == 0xff)
2322  x = src[t++];
2323  if (x & 0x80) {
2324  t -= FFMIN(2, t);
2325  break;
2326  }
2327  }
2328  }
2329  bit_count = t * 8;
2330  init_put_bits(&pb, dst, t);
2331 
2332  /* unescape bitstream */
2333  while (b < t) {
2334  uint8_t x = src[b++];
2335  put_bits(&pb, 8, x);
2336  if (x == 0xFF && b < t) {
2337  x = src[b++];
2338  if (x & 0x80) {
2339  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2340  x &= 0x7f;
2341  }
2342  put_bits(&pb, 7, x);
2343  bit_count--;
2344  }
2345  }
2346  flush_put_bits(&pb);
2347 
2348  *unescaped_buf_ptr = dst;
2349  *unescaped_buf_size = (bit_count + 7) >> 3;
2350  memset(s->buffer + *unescaped_buf_size, 0,
2352  } else {
2353  *unescaped_buf_ptr = *buf_ptr;
2354  *unescaped_buf_size = buf_end - *buf_ptr;
2355  }
2356 
2357  return start_code;
2358 }
2359 
2361 {
2362  int i;
2363 
2364  if (s->iccentries) {
2365  for (i = 0; i < s->iccnum; i++)
2366  av_freep(&s->iccentries[i].data);
2367  av_freep(&s->iccentries);
2368  }
2369 
2370  s->iccread = 0;
2371  s->iccnum = 0;
2372 }
2373 
2375  int *got_frame, const AVPacket *avpkt,
2376  const uint8_t *buf, const int buf_size)
2377 {
2378  MJpegDecodeContext *s = avctx->priv_data;
2379  const uint8_t *buf_end, *buf_ptr;
2380  const uint8_t *unescaped_buf_ptr;
2381  int hshift, vshift;
2382  int unescaped_buf_size;
2383  int start_code;
2384  int index;
2385  int ret = 0;
2386  int is16bit;
2387  AVDictionaryEntry *e = NULL;
2388 
2389  s->force_pal8 = 0;
2390 
2391  s->buf_size = buf_size;
2392 
2393  av_dict_free(&s->exif_metadata);
2394  av_freep(&s->stereo3d);
2395  s->adobe_transform = -1;
2396 
2397  if (s->iccnum != 0)
2399 
2400 redo_for_pal8:
2401  buf_ptr = buf;
2402  buf_end = buf + buf_size;
2403  while (buf_ptr < buf_end) {
2404  /* find start next marker */
2405  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2406  &unescaped_buf_ptr,
2407  &unescaped_buf_size);
2408  /* EOF */
2409  if (start_code < 0) {
2410  break;
2411  } else if (unescaped_buf_size > INT_MAX / 8) {
2412  av_log(avctx, AV_LOG_ERROR,
2413  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2414  start_code, unescaped_buf_size, buf_size);
2415  return AVERROR_INVALIDDATA;
2416  }
2417  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2418  start_code, buf_end - buf_ptr);
2419 
2420  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2421 
2422  if (ret < 0) {
2423  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2424  goto fail;
2425  }
2426 
2427  s->start_code = start_code;
2428  if (avctx->debug & FF_DEBUG_STARTCODE)
2429  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2430 
2431  /* process markers */
2432  if (start_code >= RST0 && start_code <= RST7) {
2433  av_log(avctx, AV_LOG_DEBUG,
2434  "restart marker: %d\n", start_code & 0x0f);
2435  /* APP fields */
2436  } else if (start_code >= APP0 && start_code <= APP15) {
2437  if ((ret = mjpeg_decode_app(s)) < 0)
2438  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2439  av_err2str(ret));
2440  /* Comment */
2441  } else if (start_code == COM) {
2442  ret = mjpeg_decode_com(s);
2443  if (ret < 0)
2444  return ret;
2445  } else if (start_code == DQT) {
2447  if (ret < 0)
2448  return ret;
2449  }
2450 
2451  ret = -1;
2452 
2453  if (!CONFIG_JPEGLS_DECODER &&
2454  (start_code == SOF48 || start_code == LSE)) {
2455  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2456  return AVERROR(ENOSYS);
2457  }
2458 
2459  if (avctx->skip_frame == AVDISCARD_ALL) {
2460  switch(start_code) {
2461  case SOF0:
2462  case SOF1:
2463  case SOF2:
2464  case SOF3:
2465  case SOF48:
2466  case SOI:
2467  case SOS:
2468  case EOI:
2469  break;
2470  default:
2471  goto skip;
2472  }
2473  }
2474 
2475  switch (start_code) {
2476  case SOI:
2477  s->restart_interval = 0;
2478  s->restart_count = 0;
2479  s->raw_image_buffer = buf_ptr;
2480  s->raw_image_buffer_size = buf_end - buf_ptr;
2481  /* nothing to do on SOI */
2482  break;
2483  case DHT:
2484  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2485  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2486  goto fail;
2487  }
2488  break;
2489  case SOF0:
2490  case SOF1:
2491  if (start_code == SOF0)
2493  else
2495  s->lossless = 0;
2496  s->ls = 0;
2497  s->progressive = 0;
2498  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2499  goto fail;
2500  break;
2501  case SOF2:
2503  s->lossless = 0;
2504  s->ls = 0;
2505  s->progressive = 1;
2506  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2507  goto fail;
2508  break;
2509  case SOF3:
2511 #if FF_API_CODEC_PROPS
2515 #endif
2516  s->lossless = 1;
2517  s->ls = 0;
2518  s->progressive = 0;
2519  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2520  goto fail;
2521  break;
2522  case SOF48:
2524 #if FF_API_CODEC_PROPS
2528 #endif
2529  s->lossless = 1;
2530  s->ls = 1;
2531  s->progressive = 0;
2532  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2533  goto fail;
2534  break;
2535  case LSE:
2536  if (!CONFIG_JPEGLS_DECODER ||
2537  (ret = ff_jpegls_decode_lse(s)) < 0)
2538  goto fail;
2539  if (ret == 1)
2540  goto redo_for_pal8;
2541  break;
2542  case EOI:
2543 eoi_parser:
2544  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2545  s->progressive && s->cur_scan && s->got_picture)
2547  s->cur_scan = 0;
2548  if (!s->got_picture) {
2549  av_log(avctx, AV_LOG_WARNING,
2550  "Found EOI before any SOF, ignoring\n");
2551  break;
2552  }
2553  if (s->interlaced) {
2554  s->bottom_field ^= 1;
2555  /* if not bottom field, do not output image yet */
2556  if (s->bottom_field == !s->interlace_polarity)
2557  break;
2558  }
2559  if (avctx->skip_frame == AVDISCARD_ALL) {
2560  s->got_picture = 0;
2561  goto the_end_no_picture;
2562  }
2563  if (avctx->hwaccel) {
2564  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2565  if (ret < 0)
2566  return ret;
2567 
2568  av_freep(&s->hwaccel_picture_private);
2569  }
2570  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2571  return ret;
2572  if (s->lossless)
2573  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2574  *got_frame = 1;
2575  s->got_picture = 0;
2576 
2577  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2578  int qp = FFMAX3(s->qscale[0],
2579  s->qscale[1],
2580  s->qscale[2]);
2581 
2582  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2583  }
2584 
2585  goto the_end;
2586  case SOS:
2587  s->raw_scan_buffer = buf_ptr;
2588  s->raw_scan_buffer_size = buf_end - buf_ptr;
2589 
2590  s->cur_scan++;
2591  if (avctx->skip_frame == AVDISCARD_ALL) {
2592  skip_bits(&s->gb, get_bits_left(&s->gb));
2593  break;
2594  }
2595 
2596  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2597  (avctx->err_recognition & AV_EF_EXPLODE))
2598  goto fail;
2599  break;
2600  case DRI:
2601  if ((ret = mjpeg_decode_dri(s)) < 0)
2602  return ret;
2603  break;
2604  case SOF5:
2605  case SOF6:
2606  case SOF7:
2607  case SOF9:
2608  case SOF10:
2609  case SOF11:
2610  case SOF13:
2611  case SOF14:
2612  case SOF15:
2613  case JPG:
2614  av_log(avctx, AV_LOG_ERROR,
2615  "mjpeg: unsupported coding type (%x)\n", start_code);
2616  break;
2617  }
2618 
2619 skip:
2620  /* eof process start code */
2621  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2622  av_log(avctx, AV_LOG_DEBUG,
2623  "marker parser used %d bytes (%d bits)\n",
2624  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2625  }
2626  if (s->got_picture && s->cur_scan) {
2627  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2628  goto eoi_parser;
2629  }
2630  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2631  return AVERROR_INVALIDDATA;
2632 fail:
2633  s->got_picture = 0;
2634  return ret;
2635 the_end:
2636 
2637  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2638 
2639  if (AV_RB32(s->upscale_h)) {
2640  int p;
2642  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2643  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2644  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2645  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2646  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2648  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2649  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2651  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2653  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2654  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2655  );
2656  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2657  if (ret)
2658  return ret;
2659 
2660  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2661  for (p = 0; p<s->nb_components; p++) {
2662  uint8_t *line = s->picture_ptr->data[p];
2663  int w = s->width;
2664  int h = s->height;
2665  if (!s->upscale_h[p])
2666  continue;
2667  if (p==1 || p==2) {
2668  w = AV_CEIL_RSHIFT(w, hshift);
2669  h = AV_CEIL_RSHIFT(h, vshift);
2670  }
2671  if (s->upscale_v[p] == 1)
2672  h = (h+1)>>1;
2673  av_assert0(w > 0);
2674  for (int i = 0; i < h; i++) {
2675  if (s->upscale_h[p] == 1) {
2676  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2677  else line[w - 1] = line[(w - 1) / 2];
2678  for (index = w - 2; index > 0; index--) {
2679  if (is16bit)
2680  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2681  else
2682  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2683  }
2684  } else if (s->upscale_h[p] == 2) {
2685  if (is16bit) {
2686  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2687  if (w > 1)
2688  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2689  } else {
2690  line[w - 1] = line[(w - 1) / 3];
2691  if (w > 1)
2692  line[w - 2] = line[w - 1];
2693  }
2694  for (index = w - 3; index > 0; index--) {
2695  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2696  }
2697  } else if (s->upscale_h[p] == 4){
2698  if (is16bit) {
2699  uint16_t *line16 = (uint16_t *) line;
2700  line16[w - 1] = line16[(w - 1) >> 2];
2701  if (w > 1)
2702  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2703  if (w > 2)
2704  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2705  } else {
2706  line[w - 1] = line[(w - 1) >> 2];
2707  if (w > 1)
2708  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2709  if (w > 2)
2710  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2711  }
2712  for (index = w - 4; index > 0; index--)
2713  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2714  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2715  }
2716  line += s->linesize[p];
2717  }
2718  }
2719  }
2720  if (AV_RB32(s->upscale_v)) {
2721  int p;
2723  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2727  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2728  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2729  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2730  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2731  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2732  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2733  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2734  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2735  );
2736  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2737  if (ret)
2738  return ret;
2739 
2740  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2741  for (p = 0; p < s->nb_components; p++) {
2742  uint8_t *dst;
2743  int w = s->width;
2744  int h = s->height;
2745  if (!s->upscale_v[p])
2746  continue;
2747  if (p==1 || p==2) {
2748  w = AV_CEIL_RSHIFT(w, hshift);
2749  h = AV_CEIL_RSHIFT(h, vshift);
2750  }
2751  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2752  for (int i = h - 1; i; i--) {
2753  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2754  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2755  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2756  memcpy(dst, src1, w);
2757  } else {
2758  for (index = 0; index < w; index++)
2759  dst[index] = (src1[index] + src2[index]) >> 1;
2760  }
2761  dst -= s->linesize[p];
2762  }
2763  }
2764  }
2765  if (s->flipped && !s->rgb) {
2766  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2767  if (ret)
2768  return ret;
2769 
2770  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2771  for (index=0; index<s->nb_components; index++) {
2772  int h = frame->height;
2773  if (index && index < 3)
2774  h = AV_CEIL_RSHIFT(h, vshift);
2775  if (frame->data[index]) {
2776  frame->data[index] += (h - 1) * frame->linesize[index];
2777  frame->linesize[index] *= -1;
2778  }
2779  }
2780  }
2781 
2782  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2783  av_assert0(s->nb_components == 3);
2784  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2785  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2786  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2787  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2788  }
2789 
2790  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2791  int w = s->picture_ptr->width;
2792  int h = s->picture_ptr->height;
2793  av_assert0(s->nb_components == 4);
2794  for (int i = 0; i < h; i++) {
2795  int j;
2796  uint8_t *dst[4];
2797  for (index=0; index<4; index++) {
2798  dst[index] = s->picture_ptr->data[index]
2799  + s->picture_ptr->linesize[index]*i;
2800  }
2801  for (j=0; j<w; j++) {
2802  int k = dst[3][j];
2803  int r = dst[0][j] * k;
2804  int g = dst[1][j] * k;
2805  int b = dst[2][j] * k;
2806  dst[0][j] = g*257 >> 16;
2807  dst[1][j] = b*257 >> 16;
2808  dst[2][j] = r*257 >> 16;
2809  }
2810  memset(dst[3], 255, w);
2811  }
2812  }
2813  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2814  int w = s->picture_ptr->width;
2815  int h = s->picture_ptr->height;
2816  av_assert0(s->nb_components == 4);
2817  for (int i = 0; i < h; i++) {
2818  int j;
2819  uint8_t *dst[4];
2820  for (index=0; index<4; index++) {
2821  dst[index] = s->picture_ptr->data[index]
2822  + s->picture_ptr->linesize[index]*i;
2823  }
2824  for (j=0; j<w; j++) {
2825  int k = dst[3][j];
2826  int r = (255 - dst[0][j]) * k;
2827  int g = (128 - dst[1][j]) * k;
2828  int b = (128 - dst[2][j]) * k;
2829  dst[0][j] = r*257 >> 16;
2830  dst[1][j] = (g*257 >> 16) + 128;
2831  dst[2][j] = (b*257 >> 16) + 128;
2832  }
2833  memset(dst[3], 255, w);
2834  }
2835  }
2836 
2837  if (s->stereo3d) {
2839  if (stereo) {
2840  stereo->type = s->stereo3d->type;
2841  stereo->flags = s->stereo3d->flags;
2842  }
2843  av_freep(&s->stereo3d);
2844  }
2845 
2846  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2847  AVFrameSideData *sd;
2848  size_t offset = 0;
2849  int total_size = 0;
2850 
2851  /* Sum size of all parts. */
2852  for (int i = 0; i < s->iccnum; i++)
2853  total_size += s->iccentries[i].length;
2854 
2855  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2856  if (ret < 0) {
2857  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2858  return ret;
2859  }
2860 
2861  if (sd) {
2862  /* Reassemble the parts, which are now in-order. */
2863  for (int i = 0; i < s->iccnum; i++) {
2864  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2865  offset += s->iccentries[i].length;
2866  }
2867  }
2868  }
2869 
2870  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2871  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2872  int orientation = strtol(value, &endptr, 0);
2873 
2874  if (!*endptr) {
2875  AVFrameSideData *sd = NULL;
2876 
2877  if (orientation >= 2 && orientation <= 8) {
2878  int32_t *matrix;
2879 
2881  if (!sd) {
2882  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2883  return AVERROR(ENOMEM);
2884  }
2885 
2886  matrix = (int32_t *)sd->data;
2887 
2888  switch (orientation) {
2889  case 2:
2892  break;
2893  case 3:
2895  break;
2896  case 4:
2899  break;
2900  case 5:
2903  break;
2904  case 6:
2906  break;
2907  case 7:
2910  break;
2911  case 8:
2913  break;
2914  default:
2915  av_assert0(0);
2916  }
2917  }
2918  }
2919  }
2920 
2921  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2922  av_dict_free(&s->exif_metadata);
2923 
2924  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2925  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2926  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2927  avctx->coded_height > s->orig_height) {
2928  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2929  frame->crop_top = frame->height - avctx->height;
2930  }
2931 
2932 the_end_no_picture:
2933  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2934  buf_end - buf_ptr);
2935  return buf_ptr - buf;
2936 }
2937 
2938 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2939  AVPacket *avpkt)
2940 {
2941  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2942  avpkt, avpkt->data, avpkt->size);
2943 }
2944 
2945 
2946 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2947  * even without having called ff_mjpeg_decode_init(). */
2949 {
2950  MJpegDecodeContext *s = avctx->priv_data;
2951  int i, j;
2952 
2953  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2954  av_log(avctx, AV_LOG_INFO, "Single field\n");
2955  }
2956 
2957  if (s->picture) {
2958  av_frame_free(&s->picture);
2959  s->picture_ptr = NULL;
2960  } else if (s->picture_ptr)
2961  av_frame_unref(s->picture_ptr);
2962 
2963  av_frame_free(&s->smv_frame);
2964 
2965  av_freep(&s->buffer);
2966  av_freep(&s->stereo3d);
2967  av_freep(&s->ljpeg_buffer);
2968  s->ljpeg_buffer_size = 0;
2969 
2970  for (i = 0; i < 3; i++) {
2971  for (j = 0; j < 4; j++)
2972  ff_vlc_free(&s->vlcs[i][j]);
2973  }
2974  for (i = 0; i < MAX_COMPONENTS; i++) {
2975  av_freep(&s->blocks[i]);
2976  av_freep(&s->last_nnz[i]);
2977  }
2978  av_dict_free(&s->exif_metadata);
2979 
2981 
2982  av_freep(&s->hwaccel_picture_private);
2983  av_freep(&s->jls_state);
2984 
2985  return 0;
2986 }
2987 
2988 static void decode_flush(AVCodecContext *avctx)
2989 {
2990  MJpegDecodeContext *s = avctx->priv_data;
2991  s->got_picture = 0;
2992 
2993  s->smv_next_frame = 0;
2994  av_frame_unref(s->smv_frame);
2995 }
2996 
2997 #if CONFIG_MJPEG_DECODER
2998 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2999 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
3000 static const AVOption options[] = {
3001  { "extern_huff", "Use external huffman table.",
3002  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
3003  { NULL },
3004 };
3005 
3006 static const AVClass mjpegdec_class = {
3007  .class_name = "MJPEG decoder",
3008  .item_name = av_default_item_name,
3009  .option = options,
3010  .version = LIBAVUTIL_VERSION_INT,
3011 };
3012 
3013 const FFCodec ff_mjpeg_decoder = {
3014  .p.name = "mjpeg",
3015  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
3016  .p.type = AVMEDIA_TYPE_VIDEO,
3017  .p.id = AV_CODEC_ID_MJPEG,
3018  .priv_data_size = sizeof(MJpegDecodeContext),
3020  .close = ff_mjpeg_decode_end,
3022  .flush = decode_flush,
3023  .p.capabilities = AV_CODEC_CAP_DR1,
3024  .p.max_lowres = 3,
3025  .p.priv_class = &mjpegdec_class,
3026  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
3027  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3030  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3031 #if CONFIG_MJPEG_NVDEC_HWACCEL
3032  HWACCEL_NVDEC(mjpeg),
3033 #endif
3034 #if CONFIG_MJPEG_VAAPI_HWACCEL
3035  HWACCEL_VAAPI(mjpeg),
3036 #endif
3037  NULL
3038  },
3039 };
3040 #endif
3041 #if CONFIG_THP_DECODER
3042 const FFCodec ff_thp_decoder = {
3043  .p.name = "thp",
3044  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
3045  .p.type = AVMEDIA_TYPE_VIDEO,
3046  .p.id = AV_CODEC_ID_THP,
3047  .priv_data_size = sizeof(MJpegDecodeContext),
3049  .close = ff_mjpeg_decode_end,
3051  .flush = decode_flush,
3052  .p.capabilities = AV_CODEC_CAP_DR1,
3053  .p.max_lowres = 3,
3054  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3055 };
3056 #endif
3057 
3058 #if CONFIG_SMVJPEG_DECODER
3059 // SMV JPEG just stacks several output frames into one JPEG picture
3060 // we handle that by setting up the cropping parameters appropriately
3061 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3062 {
3063  MJpegDecodeContext *s = avctx->priv_data;
3064 
3065  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3066 
3067  frame->width = avctx->coded_width;
3068  frame->height = avctx->coded_height;
3069  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3070  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3071 
3072  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3073  s->smv_frame->pts += s->smv_frame->duration;
3074  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3075 
3076  if (s->smv_next_frame == 0)
3077  av_frame_unref(s->smv_frame);
3078 }
3079 
3080 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3081 {
3082  MJpegDecodeContext *s = avctx->priv_data;
3083  AVPacket *const pkt = avctx->internal->in_pkt;
3084  int got_frame = 0;
3085  int ret;
3086 
3087  if (s->smv_next_frame > 0)
3088  goto return_frame;
3089 
3090  ret = ff_decode_get_packet(avctx, pkt);
3091  if (ret < 0)
3092  return ret;
3093 
3094  av_frame_unref(s->smv_frame);
3095 
3096  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3097  s->smv_frame->pkt_dts = pkt->dts;
3099  if (ret < 0)
3100  return ret;
3101 
3102  if (!got_frame)
3103  return AVERROR(EAGAIN);
3104 
3105  // packet duration covers all the frames in the packet
3106  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3107 
3108 return_frame:
3109  av_assert0(s->smv_frame->buf[0]);
3110  ret = av_frame_ref(frame, s->smv_frame);
3111  if (ret < 0)
3112  return ret;
3113 
3114  smv_process_frame(avctx, frame);
3115  return 0;
3116 }
3117 
3118 const FFCodec ff_smvjpeg_decoder = {
3119  .p.name = "smvjpeg",
3120  CODEC_LONG_NAME("SMV JPEG"),
3121  .p.type = AVMEDIA_TYPE_VIDEO,
3122  .p.id = AV_CODEC_ID_SMVJPEG,
3123  .priv_data_size = sizeof(MJpegDecodeContext),
3125  .close = ff_mjpeg_decode_end,
3126  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3127  .flush = decode_flush,
3128  .p.capabilities = AV_CODEC_CAP_DR1,
3129  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3131 };
3132 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1445
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:248
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:267
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:699
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1277
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1429
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2988
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:985
SOF0
@ SOF0
Definition: mjpeg.h:39
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1438
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:574
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:115
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:221
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
AVFrame::width
int width
Definition: frame.h:475
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:545
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:717
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:539
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:722
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:820
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1415
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:424
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:667
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3210
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:646
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1275
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1445
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:124
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1841
fail
#define fail()
Definition: checkasm.h:193
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:547
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:108
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2374
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2175
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:61
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3198
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:647
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:486
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:151
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:185
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
aligned
static int aligned(int val)
Definition: dashdec.c:171
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:886
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:514
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1817
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:654
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:201
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1068
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:538
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:104
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:515
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:513
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2360
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2948
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:521
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:492
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
av_clip_int16
#define av_clip_int16
Definition: common.h:115
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:493
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1636
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:204
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:198
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
tiff_common.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:42
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1460
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1003
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1581
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1876
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1103
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:401
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2938
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:904
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1671
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:61
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2099
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:55
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:490
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
VD
#define VD
Definition: av1dec.c:1568
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:292
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:169
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2215
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:176
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2270
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:838
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:537
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
src2
const pixel * src2
Definition: h264pred_template.c:421
display.h
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:204
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1846
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:169
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1422
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:623
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:716
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:632
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:662
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:700
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2060
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:977
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:99
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:203
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1419
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:367
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2240
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVFrame::height
int height
Definition: frame.h:475
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:317
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:739
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1658
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:166
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:229
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1414
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:302
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:647
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1858
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1816
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
AVDictionaryEntry::value
char * value
Definition: dict.h:91
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:85
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:675
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348