FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/opt.h"
39 #include "avcodec.h"
40 #include "blockdsp.h"
41 #include "codec_internal.h"
42 #include "copy_block.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "idctdsp.h"
46 #include "internal.h"
47 #include "jpegtables.h"
48 #include "mjpeg.h"
49 #include "mjpegdec.h"
50 #include "jpeglsdec.h"
51 #include "profiles.h"
52 #include "put_bits.h"
53 #include "tiff.h"
54 #include "exif.h"
55 #include "bytestream.h"
56 #include "tiff_common.h"
57 
58 
60 {
61  static const struct {
62  int class;
63  int index;
64  const uint8_t *bits;
65  const uint8_t *values;
66  int length;
67  } ht[] = {
69  ff_mjpeg_val_dc, 12 },
71  ff_mjpeg_val_dc, 12 },
80  };
81  int i, ret;
82 
83  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
84  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
85  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
86  ht[i].bits, ht[i].values,
87  ht[i].class == 1, s->avctx);
88  if (ret < 0)
89  return ret;
90 
91  if (ht[i].class < 2) {
92  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
93  ht[i].bits + 1, 16);
94  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
95  ht[i].values, ht[i].length);
96  }
97  }
98 
99  return 0;
100 }
101 
102 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
103 {
104  s->buggy_avid = 1;
105  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
106  s->interlace_polarity = 1;
107  if (len > 14 && buf[12] == 2) /* 2 - PAL */
108  s->interlace_polarity = 0;
109  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
110  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
111 }
112 
113 static void init_idct(AVCodecContext *avctx)
114 {
115  MJpegDecodeContext *s = avctx->priv_data;
116 
117  ff_idctdsp_init(&s->idsp, avctx);
118  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
120 }
121 
123 {
124  MJpegDecodeContext *s = avctx->priv_data;
125  int ret;
126 
127  if (!s->picture_ptr) {
128  s->picture = av_frame_alloc();
129  if (!s->picture)
130  return AVERROR(ENOMEM);
131  s->picture_ptr = s->picture;
132  }
133 
134  s->pkt = avctx->internal->in_pkt;
135 
136  s->avctx = avctx;
137  ff_blockdsp_init(&s->bdsp, avctx);
138  ff_hpeldsp_init(&s->hdsp, avctx->flags);
139  init_idct(avctx);
140  s->buffer_size = 0;
141  s->buffer = NULL;
142  s->start_code = -1;
143  s->first_picture = 1;
144  s->got_picture = 0;
145  s->orig_height = avctx->coded_height;
147  avctx->colorspace = AVCOL_SPC_BT470BG;
148  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
149 
150  if ((ret = init_default_huffman_tables(s)) < 0)
151  return ret;
152 
153  if (s->extern_huff) {
154  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
155  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
156  return ret;
157  if (ff_mjpeg_decode_dht(s)) {
158  av_log(avctx, AV_LOG_ERROR,
159  "error using external huffman table, switching back to internal\n");
160  if ((ret = init_default_huffman_tables(s)) < 0)
161  return ret;
162  }
163  }
164  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
165  s->interlace_polarity = 1; /* bottom field first */
166  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
167  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
168  if (avctx->codec_tag == AV_RL32("MJPG"))
169  s->interlace_polarity = 1;
170  }
171 
172  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
173  if (avctx->extradata_size >= 4)
174  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
175 
176  if (s->smv_frames_per_jpeg <= 0) {
177  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
178  return AVERROR_INVALIDDATA;
179  }
180 
181  s->smv_frame = av_frame_alloc();
182  if (!s->smv_frame)
183  return AVERROR(ENOMEM);
184  } else if (avctx->extradata_size > 8
185  && AV_RL32(avctx->extradata) == 0x2C
186  && AV_RL32(avctx->extradata+4) == 0x18) {
187  parse_avid(s, avctx->extradata, avctx->extradata_size);
188  }
189 
190  if (avctx->codec->id == AV_CODEC_ID_AMV)
191  s->flipped = 1;
192 
193  return 0;
194 }
195 
196 
197 /* quantize tables */
199 {
200  int len, index, i;
201 
202  len = get_bits(&s->gb, 16) - 2;
203 
204  if (8*len > get_bits_left(&s->gb)) {
205  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
206  return AVERROR_INVALIDDATA;
207  }
208 
209  while (len >= 65) {
210  int pr = get_bits(&s->gb, 4);
211  if (pr > 1) {
212  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
213  return AVERROR_INVALIDDATA;
214  }
215  index = get_bits(&s->gb, 4);
216  if (index >= 4)
217  return -1;
218  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
219  /* read quant table */
220  for (i = 0; i < 64; i++) {
221  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
222  if (s->quant_matrixes[index][i] == 0) {
223  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
224  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
225  if (s->avctx->err_recognition & AV_EF_EXPLODE)
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  for (i = 0; i < n; i++) {
274  v = get_bits(&s->gb, 8);
275  val_table[i] = v;
276  }
277  len -= n;
278 
279  /* build VLC and flush previous vlc if present */
280  ff_free_vlc(&s->vlcs[class][index]);
281  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
282  class, index, n);
283  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
284  val_table, class > 0, s->avctx)) < 0)
285  return ret;
286 
287  if (class > 0) {
288  ff_free_vlc(&s->vlcs[2][index]);
289  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
290  val_table, 0, s->avctx)) < 0)
291  return ret;
292  }
293 
294  for (i = 0; i < 16; i++)
295  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
296  for (i = 0; i < 256; i++)
297  s->raw_huffman_values[class][index][i] = val_table[i];
298  }
299  return 0;
300 }
301 
303 {
304  int len, nb_components, i, width, height, bits, ret, size_change;
305  unsigned pix_fmt_id;
306  int h_count[MAX_COMPONENTS] = { 0 };
307  int v_count[MAX_COMPONENTS] = { 0 };
308 
309  s->cur_scan = 0;
310  memset(s->upscale_h, 0, sizeof(s->upscale_h));
311  memset(s->upscale_v, 0, sizeof(s->upscale_v));
312 
313  len = get_bits(&s->gb, 16);
314  bits = get_bits(&s->gb, 8);
315 
316  if (bits > 16 || bits < 1) {
317  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
318  return AVERROR_INVALIDDATA;
319  }
320 
321  if (s->avctx->bits_per_raw_sample != bits) {
322  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
323  s->avctx->bits_per_raw_sample = bits;
324  init_idct(s->avctx);
325  }
326  if (s->pegasus_rct)
327  bits = 9;
328  if (bits == 9 && !s->pegasus_rct)
329  s->rct = 1; // FIXME ugly
330 
331  if(s->lossless && s->avctx->lowres){
332  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
333  return -1;
334  }
335 
336  height = get_bits(&s->gb, 16);
337  width = get_bits(&s->gb, 16);
338 
339  // HACK for odd_height.mov
340  if (s->interlaced && s->width == width && s->height == height + 1)
341  height= s->height;
342 
343  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
344  if (av_image_check_size(width, height, 0, s->avctx) < 0)
345  return AVERROR_INVALIDDATA;
346  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
347  return AVERROR_INVALIDDATA;
348 
349  nb_components = get_bits(&s->gb, 8);
350  if (nb_components <= 0 ||
351  nb_components > MAX_COMPONENTS)
352  return -1;
353  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
354  if (nb_components != s->nb_components) {
355  av_log(s->avctx, AV_LOG_ERROR,
356  "nb_components changing in interlaced picture\n");
357  return AVERROR_INVALIDDATA;
358  }
359  }
360  if (s->ls && !(bits <= 8 || nb_components == 1)) {
362  "JPEG-LS that is not <= 8 "
363  "bits/component or 16-bit gray");
364  return AVERROR_PATCHWELCOME;
365  }
366  if (len != 8 + 3 * nb_components) {
367  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
368  return AVERROR_INVALIDDATA;
369  }
370 
371  s->nb_components = nb_components;
372  s->h_max = 1;
373  s->v_max = 1;
374  for (i = 0; i < nb_components; i++) {
375  /* component id */
376  s->component_id[i] = get_bits(&s->gb, 8) - 1;
377  h_count[i] = get_bits(&s->gb, 4);
378  v_count[i] = get_bits(&s->gb, 4);
379  /* compute hmax and vmax (only used in interleaved case) */
380  if (h_count[i] > s->h_max)
381  s->h_max = h_count[i];
382  if (v_count[i] > s->v_max)
383  s->v_max = v_count[i];
384  s->quant_index[i] = get_bits(&s->gb, 8);
385  if (s->quant_index[i] >= 4) {
386  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
387  return AVERROR_INVALIDDATA;
388  }
389  if (!h_count[i] || !v_count[i]) {
390  av_log(s->avctx, AV_LOG_ERROR,
391  "Invalid sampling factor in component %d %d:%d\n",
392  i, h_count[i], v_count[i]);
393  return AVERROR_INVALIDDATA;
394  }
395 
396  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
397  i, h_count[i], v_count[i],
398  s->component_id[i], s->quant_index[i]);
399  }
400  if ( nb_components == 4
401  && s->component_id[0] == 'C' - 1
402  && s->component_id[1] == 'M' - 1
403  && s->component_id[2] == 'Y' - 1
404  && s->component_id[3] == 'K' - 1)
405  s->adobe_transform = 0;
406 
407  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
408  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
409  return AVERROR_PATCHWELCOME;
410  }
411 
412  if (s->bayer) {
413  if (nb_components == 2) {
414  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
415  width stored in their SOF3 markers is the width of each one. We only output
416  a single component, therefore we need to adjust the output image width. We
417  handle the deinterleaving (but not the debayering) in this file. */
418  width *= 2;
419  }
420  /* They can also contain 1 component, which is double the width and half the height
421  of the final image (rows are interleaved). We don't handle the decoding in this
422  file, but leave that to the TIFF/DNG decoder. */
423  }
424 
425  /* if different size, realloc/alloc picture */
426  if (width != s->width || height != s->height || bits != s->bits ||
427  memcmp(s->h_count, h_count, sizeof(h_count)) ||
428  memcmp(s->v_count, v_count, sizeof(v_count))) {
429  size_change = 1;
430 
431  s->width = width;
432  s->height = height;
433  s->bits = bits;
434  memcpy(s->h_count, h_count, sizeof(h_count));
435  memcpy(s->v_count, v_count, sizeof(v_count));
436  s->interlaced = 0;
437  s->got_picture = 0;
438 
439  /* test interlaced mode */
440  if (s->first_picture &&
441  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
442  s->orig_height != 0 &&
443  s->height < ((s->orig_height * 3) / 4)) {
444  s->interlaced = 1;
445  s->bottom_field = s->interlace_polarity;
446  s->picture_ptr->interlaced_frame = 1;
447  s->picture_ptr->top_field_first = !s->interlace_polarity;
448  height *= 2;
449  }
450 
451  ret = ff_set_dimensions(s->avctx, width, height);
452  if (ret < 0)
453  return ret;
454 
455  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
456  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
457  s->orig_height < height)
458  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
459 
460  s->first_picture = 0;
461  } else {
462  size_change = 0;
463  }
464 
465  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
466  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
467  if (s->avctx->height <= 0)
468  return AVERROR_INVALIDDATA;
469  }
470 
471  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
472  if (s->progressive) {
473  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
474  return AVERROR_INVALIDDATA;
475  }
476  } else {
477  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
478  s->rgb = 1;
479  else if (!s->lossless)
480  s->rgb = 0;
481  /* XXX: not complete test ! */
482  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
483  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
484  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
485  (s->h_count[3] << 4) | s->v_count[3];
486  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
487  /* NOTE we do not allocate pictures large enough for the possible
488  * padding of h/v_count being 4 */
489  if (!(pix_fmt_id & 0xD0D0D0D0))
490  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
491  if (!(pix_fmt_id & 0x0D0D0D0D))
492  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
493 
494  for (i = 0; i < 8; i++) {
495  int j = 6 + (i&1) - (i&6);
496  int is = (pix_fmt_id >> (4*i)) & 0xF;
497  int js = (pix_fmt_id >> (4*j)) & 0xF;
498 
499  if (is == 1 && js != 2 && (i < 2 || i > 5))
500  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
501  if (is == 1 && js != 2 && (i < 2 || i > 5))
502  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
503 
504  if (is == 1 && js == 2) {
505  if (i & 1) s->upscale_h[j/2] = 1;
506  else s->upscale_v[j/2] = 1;
507  }
508  }
509 
510  if (s->bayer) {
511  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
512  goto unk_pixfmt;
513  }
514 
515  switch (pix_fmt_id) {
516  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
517  if (!s->bayer)
518  goto unk_pixfmt;
519  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
520  break;
521  case 0x11111100:
522  if (s->rgb)
523  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
524  else {
525  if ( s->adobe_transform == 0
526  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
527  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
528  } else {
529  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
530  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
531  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
532  }
533  }
534  av_assert0(s->nb_components == 3);
535  break;
536  case 0x11111111:
537  if (s->rgb)
538  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
539  else {
540  if (s->adobe_transform == 0 && s->bits <= 8) {
541  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
542  } else {
543  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
544  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
545  }
546  }
547  av_assert0(s->nb_components == 4);
548  break;
549  case 0x22111122:
550  case 0x22111111:
551  if (s->adobe_transform == 0 && s->bits <= 8) {
552  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
553  s->upscale_v[1] = s->upscale_v[2] = 1;
554  s->upscale_h[1] = s->upscale_h[2] = 1;
555  } else if (s->adobe_transform == 2 && s->bits <= 8) {
556  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
557  s->upscale_v[1] = s->upscale_v[2] = 1;
558  s->upscale_h[1] = s->upscale_h[2] = 1;
559  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
560  } else {
561  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
562  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
563  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
564  }
565  av_assert0(s->nb_components == 4);
566  break;
567  case 0x12121100:
568  case 0x22122100:
569  case 0x21211100:
570  case 0x21112100:
571  case 0x22211200:
572  case 0x22221100:
573  case 0x22112200:
574  case 0x11222200:
575  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
576  else
577  goto unk_pixfmt;
578  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
579  break;
580  case 0x11000000:
581  case 0x13000000:
582  case 0x14000000:
583  case 0x31000000:
584  case 0x33000000:
585  case 0x34000000:
586  case 0x41000000:
587  case 0x43000000:
588  case 0x44000000:
589  if(s->bits <= 8)
590  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
591  else
592  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
593  break;
594  case 0x12111100:
595  case 0x14121200:
596  case 0x14111100:
597  case 0x22211100:
598  case 0x22112100:
599  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
600  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
601  else
602  goto unk_pixfmt;
603  s->upscale_v[0] = s->upscale_v[1] = 1;
604  } else {
605  if (pix_fmt_id == 0x14111100)
606  s->upscale_v[1] = s->upscale_v[2] = 1;
607  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
608  else
609  goto unk_pixfmt;
610  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
611  }
612  break;
613  case 0x21111100:
614  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
615  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
616  else
617  goto unk_pixfmt;
618  s->upscale_h[0] = s->upscale_h[1] = 1;
619  } else {
620  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
621  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
622  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
623  }
624  break;
625  case 0x31111100:
626  if (s->bits > 8)
627  goto unk_pixfmt;
628  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
629  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
630  s->upscale_h[1] = s->upscale_h[2] = 2;
631  break;
632  case 0x22121100:
633  case 0x22111200:
634  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
635  else
636  goto unk_pixfmt;
637  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
638  break;
639  case 0x22111100:
640  case 0x23111100:
641  case 0x42111100:
642  case 0x24111100:
643  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
644  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
645  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
646  if (pix_fmt_id == 0x42111100) {
647  if (s->bits > 8)
648  goto unk_pixfmt;
649  s->upscale_h[1] = s->upscale_h[2] = 1;
650  } else if (pix_fmt_id == 0x24111100) {
651  if (s->bits > 8)
652  goto unk_pixfmt;
653  s->upscale_v[1] = s->upscale_v[2] = 1;
654  } else if (pix_fmt_id == 0x23111100) {
655  if (s->bits > 8)
656  goto unk_pixfmt;
657  s->upscale_v[1] = s->upscale_v[2] = 2;
658  }
659  break;
660  case 0x41111100:
661  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
662  else
663  goto unk_pixfmt;
664  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
665  break;
666  default:
667  unk_pixfmt:
668  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
669  memset(s->upscale_h, 0, sizeof(s->upscale_h));
670  memset(s->upscale_v, 0, sizeof(s->upscale_v));
671  return AVERROR_PATCHWELCOME;
672  }
673  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
674  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
675  return AVERROR_PATCHWELCOME;
676  }
677  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
678  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
679  return AVERROR_PATCHWELCOME;
680  }
681  if (s->ls) {
682  memset(s->upscale_h, 0, sizeof(s->upscale_h));
683  memset(s->upscale_v, 0, sizeof(s->upscale_v));
684  if (s->nb_components == 3) {
685  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
686  } else if (s->nb_components != 1) {
687  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
688  return AVERROR_PATCHWELCOME;
689  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
690  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
691  else if (s->bits <= 8)
692  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
693  else
694  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
695  }
696 
697  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
698  if (!s->pix_desc) {
699  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
700  return AVERROR_BUG;
701  }
702 
703  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
704  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
705  } else {
706  enum AVPixelFormat pix_fmts[] = {
707 #if CONFIG_MJPEG_NVDEC_HWACCEL
709 #endif
710 #if CONFIG_MJPEG_VAAPI_HWACCEL
712 #endif
713  s->avctx->pix_fmt,
715  };
716  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
717  if (s->hwaccel_pix_fmt < 0)
718  return AVERROR(EINVAL);
719 
720  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
721  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
722  }
723 
724  if (s->avctx->skip_frame == AVDISCARD_ALL) {
725  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
726  s->picture_ptr->key_frame = 1;
727  s->got_picture = 1;
728  return 0;
729  }
730 
731  av_frame_unref(s->picture_ptr);
732  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
733  return -1;
734  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
735  s->picture_ptr->key_frame = 1;
736  s->got_picture = 1;
737 
738  // Lets clear the palette to avoid leaving uninitialized values in it
739  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
740  memset(s->picture_ptr->data[1], 0, 1024);
741 
742  for (i = 0; i < 4; i++)
743  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
744 
745  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
746  s->width, s->height, s->linesize[0], s->linesize[1],
747  s->interlaced, s->avctx->height);
748 
749  }
750 
751  if ((s->rgb && !s->lossless && !s->ls) ||
752  (!s->rgb && s->ls && s->nb_components > 1) ||
753  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
754  ) {
755  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
756  return AVERROR_PATCHWELCOME;
757  }
758 
759  /* totally blank picture as progressive JPEG will only add details to it */
760  if (s->progressive) {
761  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
762  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
763  for (i = 0; i < s->nb_components; i++) {
764  int size = bw * bh * s->h_count[i] * s->v_count[i];
765  av_freep(&s->blocks[i]);
766  av_freep(&s->last_nnz[i]);
767  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
768  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
769  if (!s->blocks[i] || !s->last_nnz[i])
770  return AVERROR(ENOMEM);
771  s->block_stride[i] = bw * s->h_count[i];
772  }
773  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
774  }
775 
776  if (s->avctx->hwaccel) {
777  s->hwaccel_picture_private =
778  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
779  if (!s->hwaccel_picture_private)
780  return AVERROR(ENOMEM);
781 
782  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
783  s->raw_image_buffer_size);
784  if (ret < 0)
785  return ret;
786  }
787 
788  return 0;
789 }
790 
791 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
792 {
793  int code;
794  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
795  if (code < 0 || code > 16) {
796  av_log(s->avctx, AV_LOG_WARNING,
797  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
798  0, dc_index, &s->vlcs[0][dc_index]);
799  return 0xfffff;
800  }
801 
802  if (code)
803  return get_xbits(&s->gb, code);
804  else
805  return 0;
806 }
807 
808 /* decode block and dequantize */
809 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
810  int dc_index, int ac_index, uint16_t *quant_matrix)
811 {
812  int code, i, j, level, val;
813 
814  /* DC coef */
815  val = mjpeg_decode_dc(s, dc_index);
816  if (val == 0xfffff) {
817  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
818  return AVERROR_INVALIDDATA;
819  }
820  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
821  val = av_clip_int16(val);
822  s->last_dc[component] = val;
823  block[0] = val;
824  /* AC coefs */
825  i = 0;
826  {OPEN_READER(re, &s->gb);
827  do {
828  UPDATE_CACHE(re, &s->gb);
829  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
830 
831  i += ((unsigned)code) >> 4;
832  code &= 0xf;
833  if (code) {
834  if (code > MIN_CACHE_BITS - 16)
835  UPDATE_CACHE(re, &s->gb);
836 
837  {
838  int cache = GET_CACHE(re, &s->gb);
839  int sign = (~cache) >> 31;
840  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
841  }
842 
843  LAST_SKIP_BITS(re, &s->gb, code);
844 
845  if (i > 63) {
846  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
847  return AVERROR_INVALIDDATA;
848  }
849  j = s->scantable.permutated[i];
850  block[j] = level * quant_matrix[i];
851  }
852  } while (i < 63);
853  CLOSE_READER(re, &s->gb);}
854 
855  return 0;
856 }
857 
859  int component, int dc_index,
860  uint16_t *quant_matrix, int Al)
861 {
862  unsigned val;
863  s->bdsp.clear_block(block);
864  val = mjpeg_decode_dc(s, dc_index);
865  if (val == 0xfffff) {
866  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
867  return AVERROR_INVALIDDATA;
868  }
869  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
870  s->last_dc[component] = val;
871  block[0] = val;
872  return 0;
873 }
874 
875 /* decode block and dequantize - progressive JPEG version */
877  uint8_t *last_nnz, int ac_index,
878  uint16_t *quant_matrix,
879  int ss, int se, int Al, int *EOBRUN)
880 {
881  int code, i, j, val, run;
882  unsigned level;
883 
884  if (*EOBRUN) {
885  (*EOBRUN)--;
886  return 0;
887  }
888 
889  {
890  OPEN_READER(re, &s->gb);
891  for (i = ss; ; i++) {
892  UPDATE_CACHE(re, &s->gb);
893  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
894 
895  run = ((unsigned) code) >> 4;
896  code &= 0xF;
897  if (code) {
898  i += run;
899  if (code > MIN_CACHE_BITS - 16)
900  UPDATE_CACHE(re, &s->gb);
901 
902  {
903  int cache = GET_CACHE(re, &s->gb);
904  int sign = (~cache) >> 31;
905  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
906  }
907 
908  LAST_SKIP_BITS(re, &s->gb, code);
909 
910  if (i >= se) {
911  if (i == se) {
912  j = s->scantable.permutated[se];
913  block[j] = level * (quant_matrix[se] << Al);
914  break;
915  }
916  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
917  return AVERROR_INVALIDDATA;
918  }
919  j = s->scantable.permutated[i];
920  block[j] = level * (quant_matrix[i] << Al);
921  } else {
922  if (run == 0xF) {// ZRL - skip 15 coefficients
923  i += 15;
924  if (i >= se) {
925  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
926  return AVERROR_INVALIDDATA;
927  }
928  } else {
929  val = (1 << run);
930  if (run) {
931  UPDATE_CACHE(re, &s->gb);
932  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
933  LAST_SKIP_BITS(re, &s->gb, run);
934  }
935  *EOBRUN = val - 1;
936  break;
937  }
938  }
939  }
940  CLOSE_READER(re, &s->gb);
941  }
942 
943  if (i > *last_nnz)
944  *last_nnz = i;
945 
946  return 0;
947 }
948 
949 #define REFINE_BIT(j) { \
950  UPDATE_CACHE(re, &s->gb); \
951  sign = block[j] >> 15; \
952  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
953  ((quant_matrix[i] ^ sign) - sign) << Al; \
954  LAST_SKIP_BITS(re, &s->gb, 1); \
955 }
956 
957 #define ZERO_RUN \
958 for (; ; i++) { \
959  if (i > last) { \
960  i += run; \
961  if (i > se) { \
962  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
963  return -1; \
964  } \
965  break; \
966  } \
967  j = s->scantable.permutated[i]; \
968  if (block[j]) \
969  REFINE_BIT(j) \
970  else if (run-- == 0) \
971  break; \
972 }
973 
974 /* decode block and dequantize - progressive JPEG refinement pass */
976  uint8_t *last_nnz,
977  int ac_index, uint16_t *quant_matrix,
978  int ss, int se, int Al, int *EOBRUN)
979 {
980  int code, i = ss, j, sign, val, run;
981  int last = FFMIN(se, *last_nnz);
982 
983  OPEN_READER(re, &s->gb);
984  if (*EOBRUN) {
985  (*EOBRUN)--;
986  } else {
987  for (; ; i++) {
988  UPDATE_CACHE(re, &s->gb);
989  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
990 
991  if (code & 0xF) {
992  run = ((unsigned) code) >> 4;
993  UPDATE_CACHE(re, &s->gb);
994  val = SHOW_UBITS(re, &s->gb, 1);
995  LAST_SKIP_BITS(re, &s->gb, 1);
996  ZERO_RUN;
997  j = s->scantable.permutated[i];
998  val--;
999  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1000  if (i == se) {
1001  if (i > *last_nnz)
1002  *last_nnz = i;
1003  CLOSE_READER(re, &s->gb);
1004  return 0;
1005  }
1006  } else {
1007  run = ((unsigned) code) >> 4;
1008  if (run == 0xF) {
1009  ZERO_RUN;
1010  } else {
1011  val = run;
1012  run = (1 << run);
1013  if (val) {
1014  UPDATE_CACHE(re, &s->gb);
1015  run += SHOW_UBITS(re, &s->gb, val);
1016  LAST_SKIP_BITS(re, &s->gb, val);
1017  }
1018  *EOBRUN = run - 1;
1019  break;
1020  }
1021  }
1022  }
1023 
1024  if (i > *last_nnz)
1025  *last_nnz = i;
1026  }
1027 
1028  for (; i <= last; i++) {
1029  j = s->scantable.permutated[i];
1030  if (block[j])
1031  REFINE_BIT(j)
1032  }
1033  CLOSE_READER(re, &s->gb);
1034 
1035  return 0;
1036 }
1037 #undef REFINE_BIT
1038 #undef ZERO_RUN
1039 
1040 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1041 {
1042  int i;
1043  int reset = 0;
1044 
1045  if (s->restart_interval) {
1046  s->restart_count--;
1047  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1048  align_get_bits(&s->gb);
1049  for (i = 0; i < nb_components; i++) /* reset dc */
1050  s->last_dc[i] = (4 << s->bits);
1051  }
1052 
1053  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1054  /* skip RSTn */
1055  if (s->restart_count == 0) {
1056  if( show_bits(&s->gb, i) == (1 << i) - 1
1057  || show_bits(&s->gb, i) == 0xFF) {
1058  int pos = get_bits_count(&s->gb);
1059  align_get_bits(&s->gb);
1060  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1061  skip_bits(&s->gb, 8);
1062  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1063  for (i = 0; i < nb_components; i++) /* reset dc */
1064  s->last_dc[i] = (4 << s->bits);
1065  reset = 1;
1066  } else
1067  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1068  }
1069  }
1070  }
1071  return reset;
1072 }
1073 
1074 /* Handles 1 to 4 components */
1075 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1076 {
1077  int i, mb_x, mb_y;
1078  unsigned width;
1079  uint16_t (*buffer)[4];
1080  int left[4], top[4], topleft[4];
1081  const int linesize = s->linesize[0];
1082  const int mask = ((1 << s->bits) - 1) << point_transform;
1083  int resync_mb_y = 0;
1084  int resync_mb_x = 0;
1085  int vpred[6];
1086 
1087  if (!s->bayer && s->nb_components < 3)
1088  return AVERROR_INVALIDDATA;
1089  if (s->bayer && s->nb_components > 2)
1090  return AVERROR_INVALIDDATA;
1091  if (s->nb_components <= 0 || s->nb_components > 4)
1092  return AVERROR_INVALIDDATA;
1093  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1094  return AVERROR_INVALIDDATA;
1095 
1096 
1097  s->restart_count = s->restart_interval;
1098 
1099  if (s->restart_interval == 0)
1100  s->restart_interval = INT_MAX;
1101 
1102  if (s->bayer)
1103  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1104  else
1105  width = s->mb_width;
1106 
1107  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1108  if (!s->ljpeg_buffer)
1109  return AVERROR(ENOMEM);
1110 
1111  buffer = s->ljpeg_buffer;
1112 
1113  for (i = 0; i < 4; i++)
1114  buffer[0][i] = 1 << (s->bits - 1);
1115 
1116  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1117  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1118 
1119  if (s->interlaced && s->bottom_field)
1120  ptr += linesize >> 1;
1121 
1122  for (i = 0; i < 4; i++)
1123  top[i] = left[i] = topleft[i] = buffer[0][i];
1124 
1125  if ((mb_y * s->width) % s->restart_interval == 0) {
1126  for (i = 0; i < 6; i++)
1127  vpred[i] = 1 << (s->bits-1);
1128  }
1129 
1130  for (mb_x = 0; mb_x < width; mb_x++) {
1131  int modified_predictor = predictor;
1132 
1133  if (get_bits_left(&s->gb) < 1) {
1134  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1135  return AVERROR_INVALIDDATA;
1136  }
1137 
1138  if (s->restart_interval && !s->restart_count){
1139  s->restart_count = s->restart_interval;
1140  resync_mb_x = mb_x;
1141  resync_mb_y = mb_y;
1142  for(i=0; i<4; i++)
1143  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1144  }
1145  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1146  modified_predictor = 1;
1147 
1148  for (i=0;i<nb_components;i++) {
1149  int pred, dc;
1150 
1151  topleft[i] = top[i];
1152  top[i] = buffer[mb_x][i];
1153 
1154  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1155  if(dc == 0xFFFFF)
1156  return -1;
1157 
1158  if (!s->bayer || mb_x) {
1159  pred = left[i];
1160  } else { /* This path runs only for the first line in bayer images */
1161  vpred[i] += dc;
1162  pred = vpred[i] - dc;
1163  }
1164 
1165  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1166 
1167  left[i] = buffer[mb_x][i] =
1168  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1169  }
1170 
1171  if (s->restart_interval && !--s->restart_count) {
1172  align_get_bits(&s->gb);
1173  skip_bits(&s->gb, 16); /* skip RSTn */
1174  }
1175  }
1176  if (s->rct && s->nb_components == 4) {
1177  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1178  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1179  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1180  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1181  ptr[4*mb_x + 0] = buffer[mb_x][3];
1182  }
1183  } else if (s->nb_components == 4) {
1184  for(i=0; i<nb_components; i++) {
1185  int c= s->comp_index[i];
1186  if (s->bits <= 8) {
1187  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1188  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1189  }
1190  } else if(s->bits == 9) {
1191  return AVERROR_PATCHWELCOME;
1192  } else {
1193  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1194  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1195  }
1196  }
1197  }
1198  } else if (s->rct) {
1199  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1200  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1201  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1202  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1203  }
1204  } else if (s->pegasus_rct) {
1205  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1206  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1207  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1208  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1209  }
1210  } else if (s->bayer) {
1211  if (nb_components == 1) {
1212  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1213  for (mb_x = 0; mb_x < width; mb_x++)
1214  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1215  } else if (nb_components == 2) {
1216  for (mb_x = 0; mb_x < width; mb_x++) {
1217  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1218  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1219  }
1220  }
1221  } else {
1222  for(i=0; i<nb_components; i++) {
1223  int c= s->comp_index[i];
1224  if (s->bits <= 8) {
1225  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1226  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1227  }
1228  } else if(s->bits == 9) {
1229  return AVERROR_PATCHWELCOME;
1230  } else {
1231  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1232  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1233  }
1234  }
1235  }
1236  }
1237  }
1238  return 0;
1239 }
1240 
1242  int point_transform, int nb_components)
1243 {
1244  int i, mb_x, mb_y, mask;
1245  int bits= (s->bits+7)&~7;
1246  int resync_mb_y = 0;
1247  int resync_mb_x = 0;
1248 
1249  point_transform += bits - s->bits;
1250  mask = ((1 << s->bits) - 1) << point_transform;
1251 
1252  av_assert0(nb_components>=1 && nb_components<=4);
1253 
1254  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1255  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1256  if (get_bits_left(&s->gb) < 1) {
1257  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1258  return AVERROR_INVALIDDATA;
1259  }
1260  if (s->restart_interval && !s->restart_count){
1261  s->restart_count = s->restart_interval;
1262  resync_mb_x = mb_x;
1263  resync_mb_y = mb_y;
1264  }
1265 
1266  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1267  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1268  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1269  for (i = 0; i < nb_components; i++) {
1270  uint8_t *ptr;
1271  uint16_t *ptr16;
1272  int n, h, v, x, y, c, j, linesize;
1273  n = s->nb_blocks[i];
1274  c = s->comp_index[i];
1275  h = s->h_scount[i];
1276  v = s->v_scount[i];
1277  x = 0;
1278  y = 0;
1279  linesize= s->linesize[c];
1280 
1281  if(bits>8) linesize /= 2;
1282 
1283  for(j=0; j<n; j++) {
1284  int pred, dc;
1285 
1286  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1287  if(dc == 0xFFFFF)
1288  return -1;
1289  if ( h * mb_x + x >= s->width
1290  || v * mb_y + y >= s->height) {
1291  // Nothing to do
1292  } else if (bits<=8) {
1293  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1294  if(y==0 && toprow){
1295  if(x==0 && leftcol){
1296  pred= 1 << (bits - 1);
1297  }else{
1298  pred= ptr[-1];
1299  }
1300  }else{
1301  if(x==0 && leftcol){
1302  pred= ptr[-linesize];
1303  }else{
1304  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1305  }
1306  }
1307 
1308  if (s->interlaced && s->bottom_field)
1309  ptr += linesize >> 1;
1310  pred &= mask;
1311  *ptr= pred + ((unsigned)dc << point_transform);
1312  }else{
1313  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1314  if(y==0 && toprow){
1315  if(x==0 && leftcol){
1316  pred= 1 << (bits - 1);
1317  }else{
1318  pred= ptr16[-1];
1319  }
1320  }else{
1321  if(x==0 && leftcol){
1322  pred= ptr16[-linesize];
1323  }else{
1324  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1325  }
1326  }
1327 
1328  if (s->interlaced && s->bottom_field)
1329  ptr16 += linesize >> 1;
1330  pred &= mask;
1331  *ptr16= pred + ((unsigned)dc << point_transform);
1332  }
1333  if (++x == h) {
1334  x = 0;
1335  y++;
1336  }
1337  }
1338  }
1339  } else {
1340  for (i = 0; i < nb_components; i++) {
1341  uint8_t *ptr;
1342  uint16_t *ptr16;
1343  int n, h, v, x, y, c, j, linesize, dc;
1344  n = s->nb_blocks[i];
1345  c = s->comp_index[i];
1346  h = s->h_scount[i];
1347  v = s->v_scount[i];
1348  x = 0;
1349  y = 0;
1350  linesize = s->linesize[c];
1351 
1352  if(bits>8) linesize /= 2;
1353 
1354  for (j = 0; j < n; j++) {
1355  int pred;
1356 
1357  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1358  if(dc == 0xFFFFF)
1359  return -1;
1360  if ( h * mb_x + x >= s->width
1361  || v * mb_y + y >= s->height) {
1362  // Nothing to do
1363  } else if (bits<=8) {
1364  ptr = s->picture_ptr->data[c] +
1365  (linesize * (v * mb_y + y)) +
1366  (h * mb_x + x); //FIXME optimize this crap
1367  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1368 
1369  pred &= mask;
1370  *ptr = pred + ((unsigned)dc << point_transform);
1371  }else{
1372  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1373  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1374 
1375  pred &= mask;
1376  *ptr16= pred + ((unsigned)dc << point_transform);
1377  }
1378 
1379  if (++x == h) {
1380  x = 0;
1381  y++;
1382  }
1383  }
1384  }
1385  }
1386  if (s->restart_interval && !--s->restart_count) {
1387  align_get_bits(&s->gb);
1388  skip_bits(&s->gb, 16); /* skip RSTn */
1389  }
1390  }
1391  }
1392  return 0;
1393 }
1394 
1396  uint8_t *dst, const uint8_t *src,
1397  int linesize, int lowres)
1398 {
1399  switch (lowres) {
1400  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1401  break;
1402  case 1: copy_block4(dst, src, linesize, linesize, 4);
1403  break;
1404  case 2: copy_block2(dst, src, linesize, linesize, 2);
1405  break;
1406  case 3: *dst = *src;
1407  break;
1408  }
1409 }
1410 
1411 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1412 {
1413  int block_x, block_y;
1414  int size = 8 >> s->avctx->lowres;
1415  if (s->bits > 8) {
1416  for (block_y=0; block_y<size; block_y++)
1417  for (block_x=0; block_x<size; block_x++)
1418  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1419  } else {
1420  for (block_y=0; block_y<size; block_y++)
1421  for (block_x=0; block_x<size; block_x++)
1422  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1423  }
1424 }
1425 
1426 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1427  int Al, const uint8_t *mb_bitmask,
1428  int mb_bitmask_size,
1429  const AVFrame *reference)
1430 {
1431  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1432  uint8_t *data[MAX_COMPONENTS];
1433  const uint8_t *reference_data[MAX_COMPONENTS];
1434  int linesize[MAX_COMPONENTS];
1435  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1436  int bytes_per_pixel = 1 + (s->bits > 8);
1437 
1438  if (mb_bitmask) {
1439  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1440  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1441  return AVERROR_INVALIDDATA;
1442  }
1443  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1444  }
1445 
1446  s->restart_count = 0;
1447 
1448  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1449  &chroma_v_shift);
1450  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1451  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1452 
1453  for (i = 0; i < nb_components; i++) {
1454  int c = s->comp_index[i];
1455  data[c] = s->picture_ptr->data[c];
1456  reference_data[c] = reference ? reference->data[c] : NULL;
1457  linesize[c] = s->linesize[c];
1458  s->coefs_finished[c] |= 1;
1459  }
1460 
1461  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1462  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1463  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1464 
1465  if (s->restart_interval && !s->restart_count)
1466  s->restart_count = s->restart_interval;
1467 
1468  if (get_bits_left(&s->gb) < 0) {
1469  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1470  -get_bits_left(&s->gb));
1471  return AVERROR_INVALIDDATA;
1472  }
1473  for (i = 0; i < nb_components; i++) {
1474  uint8_t *ptr;
1475  int n, h, v, x, y, c, j;
1476  int block_offset;
1477  n = s->nb_blocks[i];
1478  c = s->comp_index[i];
1479  h = s->h_scount[i];
1480  v = s->v_scount[i];
1481  x = 0;
1482  y = 0;
1483  for (j = 0; j < n; j++) {
1484  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1485  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1486 
1487  if (s->interlaced && s->bottom_field)
1488  block_offset += linesize[c] >> 1;
1489  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1490  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1491  ptr = data[c] + block_offset;
1492  } else
1493  ptr = NULL;
1494  if (!s->progressive) {
1495  if (copy_mb) {
1496  if (ptr)
1497  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1498  linesize[c], s->avctx->lowres);
1499 
1500  } else {
1501  s->bdsp.clear_block(s->block);
1502  if (decode_block(s, s->block, i,
1503  s->dc_index[i], s->ac_index[i],
1504  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1505  av_log(s->avctx, AV_LOG_ERROR,
1506  "error y=%d x=%d\n", mb_y, mb_x);
1507  return AVERROR_INVALIDDATA;
1508  }
1509  if (ptr) {
1510  s->idsp.idct_put(ptr, linesize[c], s->block);
1511  if (s->bits & 7)
1512  shift_output(s, ptr, linesize[c]);
1513  }
1514  }
1515  } else {
1516  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1517  (h * mb_x + x);
1518  int16_t *block = s->blocks[c][block_idx];
1519  if (Ah)
1520  block[0] += get_bits1(&s->gb) *
1521  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1522  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1523  s->quant_matrixes[s->quant_sindex[i]],
1524  Al) < 0) {
1525  av_log(s->avctx, AV_LOG_ERROR,
1526  "error y=%d x=%d\n", mb_y, mb_x);
1527  return AVERROR_INVALIDDATA;
1528  }
1529  }
1530  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1531  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1532  mb_x, mb_y, x, y, c, s->bottom_field,
1533  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1534  if (++x == h) {
1535  x = 0;
1536  y++;
1537  }
1538  }
1539  }
1540 
1541  handle_rstn(s, nb_components);
1542  }
1543  }
1544  return 0;
1545 }
1546 
1548  int se, int Ah, int Al)
1549 {
1550  int mb_x, mb_y;
1551  int EOBRUN = 0;
1552  int c = s->comp_index[0];
1553  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1554 
1555  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1556  if (se < ss || se > 63) {
1557  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1558  return AVERROR_INVALIDDATA;
1559  }
1560 
1561  // s->coefs_finished is a bitmask for coefficients coded
1562  // ss and se are parameters telling start and end coefficients
1563  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1564 
1565  s->restart_count = 0;
1566 
1567  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1568  int block_idx = mb_y * s->block_stride[c];
1569  int16_t (*block)[64] = &s->blocks[c][block_idx];
1570  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1571  if (get_bits_left(&s->gb) <= 0) {
1572  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1573  return AVERROR_INVALIDDATA;
1574  }
1575  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1576  int ret;
1577  if (s->restart_interval && !s->restart_count)
1578  s->restart_count = s->restart_interval;
1579 
1580  if (Ah)
1581  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1582  quant_matrix, ss, se, Al, &EOBRUN);
1583  else
1584  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1585  quant_matrix, ss, se, Al, &EOBRUN);
1586 
1587  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1589  if (ret < 0) {
1590  av_log(s->avctx, AV_LOG_ERROR,
1591  "error y=%d x=%d\n", mb_y, mb_x);
1592  return AVERROR_INVALIDDATA;
1593  }
1594 
1595  if (handle_rstn(s, 0))
1596  EOBRUN = 0;
1597  }
1598  }
1599  return 0;
1600 }
1601 
1603 {
1604  int mb_x, mb_y;
1605  int c;
1606  const int bytes_per_pixel = 1 + (s->bits > 8);
1607  const int block_size = s->lossless ? 1 : 8;
1608 
1609  for (c = 0; c < s->nb_components; c++) {
1610  uint8_t *data = s->picture_ptr->data[c];
1611  int linesize = s->linesize[c];
1612  int h = s->h_max / s->h_count[c];
1613  int v = s->v_max / s->v_count[c];
1614  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1615  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1616 
1617  if (~s->coefs_finished[c])
1618  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1619 
1620  if (s->interlaced && s->bottom_field)
1621  data += linesize >> 1;
1622 
1623  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1624  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1625  int block_idx = mb_y * s->block_stride[c];
1626  int16_t (*block)[64] = &s->blocks[c][block_idx];
1627  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1628  s->idsp.idct_put(ptr, linesize, *block);
1629  if (s->bits & 7)
1630  shift_output(s, ptr, linesize);
1631  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1632  }
1633  }
1634  }
1635 }
1636 
1637 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1638  int mb_bitmask_size, const AVFrame *reference)
1639 {
1640  int len, nb_components, i, h, v, predictor, point_transform;
1641  int index, id, ret;
1642  const int block_size = s->lossless ? 1 : 8;
1643  int ilv, prev_shift;
1644 
1645  if (!s->got_picture) {
1646  av_log(s->avctx, AV_LOG_WARNING,
1647  "Can not process SOS before SOF, skipping\n");
1648  return -1;
1649  }
1650 
1651  if (reference) {
1652  if (reference->width != s->picture_ptr->width ||
1653  reference->height != s->picture_ptr->height ||
1654  reference->format != s->picture_ptr->format) {
1655  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1656  return AVERROR_INVALIDDATA;
1657  }
1658  }
1659 
1660  /* XXX: verify len field validity */
1661  len = get_bits(&s->gb, 16);
1662  nb_components = get_bits(&s->gb, 8);
1663  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1665  "decode_sos: nb_components (%d)",
1666  nb_components);
1667  return AVERROR_PATCHWELCOME;
1668  }
1669  if (len != 6 + 2 * nb_components) {
1670  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1671  return AVERROR_INVALIDDATA;
1672  }
1673  for (i = 0; i < nb_components; i++) {
1674  id = get_bits(&s->gb, 8) - 1;
1675  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1676  /* find component index */
1677  for (index = 0; index < s->nb_components; index++)
1678  if (id == s->component_id[index])
1679  break;
1680  if (index == s->nb_components) {
1681  av_log(s->avctx, AV_LOG_ERROR,
1682  "decode_sos: index(%d) out of components\n", index);
1683  return AVERROR_INVALIDDATA;
1684  }
1685  /* Metasoft MJPEG codec has Cb and Cr swapped */
1686  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1687  && nb_components == 3 && s->nb_components == 3 && i)
1688  index = 3 - i;
1689 
1690  s->quant_sindex[i] = s->quant_index[index];
1691  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1692  s->h_scount[i] = s->h_count[index];
1693  s->v_scount[i] = s->v_count[index];
1694 
1695  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1696  index = (index+2)%3;
1697 
1698  s->comp_index[i] = index;
1699 
1700  s->dc_index[i] = get_bits(&s->gb, 4);
1701  s->ac_index[i] = get_bits(&s->gb, 4);
1702 
1703  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1704  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1705  goto out_of_range;
1706  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1707  goto out_of_range;
1708  }
1709 
1710  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1711  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1712  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1713  prev_shift = get_bits(&s->gb, 4); /* Ah */
1714  point_transform = get_bits(&s->gb, 4); /* Al */
1715  }else
1716  prev_shift = point_transform = 0;
1717 
1718  if (nb_components > 1) {
1719  /* interleaved stream */
1720  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1721  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1722  } else if (!s->ls) { /* skip this for JPEG-LS */
1723  h = s->h_max / s->h_scount[0];
1724  v = s->v_max / s->v_scount[0];
1725  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1726  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1727  s->nb_blocks[0] = 1;
1728  s->h_scount[0] = 1;
1729  s->v_scount[0] = 1;
1730  }
1731 
1732  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1733  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1734  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1735  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1736  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1737 
1738 
1739  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1740  for (i = s->mjpb_skiptosod; i > 0; i--)
1741  skip_bits(&s->gb, 8);
1742 
1743 next_field:
1744  for (i = 0; i < nb_components; i++)
1745  s->last_dc[i] = (4 << s->bits);
1746 
1747  if (s->avctx->hwaccel) {
1748  int bytes_to_start = get_bits_count(&s->gb) / 8;
1749  av_assert0(bytes_to_start >= 0 &&
1750  s->raw_scan_buffer_size >= bytes_to_start);
1751 
1752  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1753  s->raw_scan_buffer + bytes_to_start,
1754  s->raw_scan_buffer_size - bytes_to_start);
1755  if (ret < 0)
1756  return ret;
1757 
1758  } else if (s->lossless) {
1759  av_assert0(s->picture_ptr == s->picture);
1760  if (CONFIG_JPEGLS_DECODER && s->ls) {
1761 // for () {
1762 // reset_ls_coding_parameters(s, 0);
1763 
1765  point_transform, ilv)) < 0)
1766  return ret;
1767  } else {
1768  if (s->rgb || s->bayer) {
1769  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1770  return ret;
1771  } else {
1773  point_transform,
1774  nb_components)) < 0)
1775  return ret;
1776  }
1777  }
1778  } else {
1779  if (s->progressive && predictor) {
1780  av_assert0(s->picture_ptr == s->picture);
1782  ilv, prev_shift,
1783  point_transform)) < 0)
1784  return ret;
1785  } else {
1786  if ((ret = mjpeg_decode_scan(s, nb_components,
1787  prev_shift, point_transform,
1788  mb_bitmask, mb_bitmask_size, reference)) < 0)
1789  return ret;
1790  }
1791  }
1792 
1793  if (s->interlaced &&
1794  get_bits_left(&s->gb) > 32 &&
1795  show_bits(&s->gb, 8) == 0xFF) {
1796  GetBitContext bak = s->gb;
1797  align_get_bits(&bak);
1798  if (show_bits(&bak, 16) == 0xFFD1) {
1799  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1800  s->gb = bak;
1801  skip_bits(&s->gb, 16);
1802  s->bottom_field ^= 1;
1803 
1804  goto next_field;
1805  }
1806  }
1807 
1808  emms_c();
1809  return 0;
1810  out_of_range:
1811  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1812  return AVERROR_INVALIDDATA;
1813 }
1814 
1816 {
1817  if (get_bits(&s->gb, 16) != 4)
1818  return AVERROR_INVALIDDATA;
1819  s->restart_interval = get_bits(&s->gb, 16);
1820  s->restart_count = 0;
1821  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1822  s->restart_interval);
1823 
1824  return 0;
1825 }
1826 
1828 {
1829  int len, id, i;
1830 
1831  len = get_bits(&s->gb, 16);
1832  if (len < 6) {
1833  if (s->bayer) {
1834  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1835  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1836  skip_bits(&s->gb, len);
1837  return 0;
1838  } else
1839  return AVERROR_INVALIDDATA;
1840  }
1841  if (8 * len > get_bits_left(&s->gb))
1842  return AVERROR_INVALIDDATA;
1843 
1844  id = get_bits_long(&s->gb, 32);
1845  len -= 6;
1846 
1847  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1848  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1849  av_fourcc2str(av_bswap32(id)), id, len);
1850 
1851  /* Buggy AVID, it puts EOI only at every 10th frame. */
1852  /* Also, this fourcc is used by non-avid files too, it holds some
1853  information, but it's always present in AVID-created files. */
1854  if (id == AV_RB32("AVI1")) {
1855  /* structure:
1856  4bytes AVI1
1857  1bytes polarity
1858  1bytes always zero
1859  4bytes field_size
1860  4bytes field_size_less_padding
1861  */
1862  s->buggy_avid = 1;
1863  i = get_bits(&s->gb, 8); len--;
1864  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1865  goto out;
1866  }
1867 
1868  if (id == AV_RB32("JFIF")) {
1869  int t_w, t_h, v1, v2;
1870  if (len < 8)
1871  goto out;
1872  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1873  v1 = get_bits(&s->gb, 8);
1874  v2 = get_bits(&s->gb, 8);
1875  skip_bits(&s->gb, 8);
1876 
1877  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1878  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1879  if ( s->avctx->sample_aspect_ratio.num <= 0
1880  || s->avctx->sample_aspect_ratio.den <= 0) {
1881  s->avctx->sample_aspect_ratio.num = 0;
1882  s->avctx->sample_aspect_ratio.den = 1;
1883  }
1884 
1885  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1886  av_log(s->avctx, AV_LOG_INFO,
1887  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1888  v1, v2,
1889  s->avctx->sample_aspect_ratio.num,
1890  s->avctx->sample_aspect_ratio.den);
1891 
1892  len -= 8;
1893  if (len >= 2) {
1894  t_w = get_bits(&s->gb, 8);
1895  t_h = get_bits(&s->gb, 8);
1896  if (t_w && t_h) {
1897  /* skip thumbnail */
1898  if (len -10 - (t_w * t_h * 3) > 0)
1899  len -= t_w * t_h * 3;
1900  }
1901  len -= 2;
1902  }
1903  goto out;
1904  }
1905 
1906  if ( id == AV_RB32("Adob")
1907  && len >= 7
1908  && show_bits(&s->gb, 8) == 'e'
1909  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1910  skip_bits(&s->gb, 8); /* 'e' */
1911  skip_bits(&s->gb, 16); /* version */
1912  skip_bits(&s->gb, 16); /* flags0 */
1913  skip_bits(&s->gb, 16); /* flags1 */
1914  s->adobe_transform = get_bits(&s->gb, 8);
1915  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1916  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1917  len -= 7;
1918  goto out;
1919  }
1920 
1921  if (id == AV_RB32("LJIF")) {
1922  int rgb = s->rgb;
1923  int pegasus_rct = s->pegasus_rct;
1924  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1925  av_log(s->avctx, AV_LOG_INFO,
1926  "Pegasus lossless jpeg header found\n");
1927  skip_bits(&s->gb, 16); /* version ? */
1928  skip_bits(&s->gb, 16); /* unknown always 0? */
1929  skip_bits(&s->gb, 16); /* unknown always 0? */
1930  skip_bits(&s->gb, 16); /* unknown always 0? */
1931  switch (i=get_bits(&s->gb, 8)) {
1932  case 1:
1933  rgb = 1;
1934  pegasus_rct = 0;
1935  break;
1936  case 2:
1937  rgb = 1;
1938  pegasus_rct = 1;
1939  break;
1940  default:
1941  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1942  }
1943 
1944  len -= 9;
1945  if (s->got_picture)
1946  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1947  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1948  goto out;
1949  }
1950 
1951  s->rgb = rgb;
1952  s->pegasus_rct = pegasus_rct;
1953 
1954  goto out;
1955  }
1956  if (id == AV_RL32("colr") && len > 0) {
1957  s->colr = get_bits(&s->gb, 8);
1958  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1959  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1960  len --;
1961  goto out;
1962  }
1963  if (id == AV_RL32("xfrm") && len > 0) {
1964  s->xfrm = get_bits(&s->gb, 8);
1965  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1966  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1967  len --;
1968  goto out;
1969  }
1970 
1971  /* JPS extension by VRex */
1972  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1973  int flags, layout, type;
1974  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1975  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1976 
1977  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1978  skip_bits(&s->gb, 16); len -= 2; /* block length */
1979  skip_bits(&s->gb, 8); /* reserved */
1980  flags = get_bits(&s->gb, 8);
1981  layout = get_bits(&s->gb, 8);
1982  type = get_bits(&s->gb, 8);
1983  len -= 4;
1984 
1985  av_freep(&s->stereo3d);
1986  s->stereo3d = av_stereo3d_alloc();
1987  if (!s->stereo3d) {
1988  goto out;
1989  }
1990  if (type == 0) {
1991  s->stereo3d->type = AV_STEREO3D_2D;
1992  } else if (type == 1) {
1993  switch (layout) {
1994  case 0x01:
1995  s->stereo3d->type = AV_STEREO3D_LINES;
1996  break;
1997  case 0x02:
1998  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1999  break;
2000  case 0x03:
2001  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2002  break;
2003  }
2004  if (!(flags & 0x04)) {
2005  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2006  }
2007  }
2008  goto out;
2009  }
2010 
2011  /* EXIF metadata */
2012  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2013  GetByteContext gbytes;
2014  int ret, le, ifd_offset, bytes_read;
2015  const uint8_t *aligned;
2016 
2017  skip_bits(&s->gb, 16); // skip padding
2018  len -= 2;
2019 
2020  // init byte wise reading
2021  aligned = align_get_bits(&s->gb);
2022  bytestream2_init(&gbytes, aligned, len);
2023 
2024  // read TIFF header
2025  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2026  if (ret) {
2027  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2028  } else {
2029  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2030 
2031  // read 0th IFD and store the metadata
2032  // (return values > 0 indicate the presence of subimage metadata)
2033  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2034  if (ret < 0) {
2035  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2036  }
2037  }
2038 
2039  bytes_read = bytestream2_tell(&gbytes);
2040  skip_bits(&s->gb, bytes_read << 3);
2041  len -= bytes_read;
2042 
2043  goto out;
2044  }
2045 
2046  /* Apple MJPEG-A */
2047  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2048  id = get_bits_long(&s->gb, 32);
2049  len -= 4;
2050  /* Apple MJPEG-A */
2051  if (id == AV_RB32("mjpg")) {
2052  /* structure:
2053  4bytes field size
2054  4bytes pad field size
2055  4bytes next off
2056  4bytes quant off
2057  4bytes huff off
2058  4bytes image off
2059  4bytes scan off
2060  4bytes data off
2061  */
2062  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2063  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2064  }
2065  }
2066 
2067  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2068  int id2;
2069  unsigned seqno;
2070  unsigned nummarkers;
2071 
2072  id = get_bits_long(&s->gb, 32);
2073  id2 = get_bits(&s->gb, 24);
2074  len -= 7;
2075  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2076  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2077  goto out;
2078  }
2079 
2080  skip_bits(&s->gb, 8);
2081  seqno = get_bits(&s->gb, 8);
2082  len -= 2;
2083  if (seqno == 0) {
2084  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2085  goto out;
2086  }
2087 
2088  nummarkers = get_bits(&s->gb, 8);
2089  len -= 1;
2090  if (nummarkers == 0) {
2091  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2092  goto out;
2093  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2094  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2095  goto out;
2096  } else if (seqno > nummarkers) {
2097  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2098  goto out;
2099  }
2100 
2101  /* Allocate if this is the first APP2 we've seen. */
2102  if (s->iccnum == 0) {
2103  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2104  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2105  return AVERROR(ENOMEM);
2106  }
2107  s->iccnum = nummarkers;
2108  }
2109 
2110  if (s->iccentries[seqno - 1].data) {
2111  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2112  goto out;
2113  }
2114 
2115  s->iccentries[seqno - 1].length = len;
2116  s->iccentries[seqno - 1].data = av_malloc(len);
2117  if (!s->iccentries[seqno - 1].data) {
2118  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2119  return AVERROR(ENOMEM);
2120  }
2121 
2122  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2123  skip_bits(&s->gb, len << 3);
2124  len = 0;
2125  s->iccread++;
2126 
2127  if (s->iccread > s->iccnum)
2128  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2129  }
2130 
2131 out:
2132  /* slow but needed for extreme adobe jpegs */
2133  if (len < 0)
2134  av_log(s->avctx, AV_LOG_ERROR,
2135  "mjpeg: error, decode_app parser read over the end\n");
2136  while (--len > 0)
2137  skip_bits(&s->gb, 8);
2138 
2139  return 0;
2140 }
2141 
2143 {
2144  int len = get_bits(&s->gb, 16);
2145  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2146  int i;
2147  char *cbuf = av_malloc(len - 1);
2148  if (!cbuf)
2149  return AVERROR(ENOMEM);
2150 
2151  for (i = 0; i < len - 2; i++)
2152  cbuf[i] = get_bits(&s->gb, 8);
2153  if (i > 0 && cbuf[i - 1] == '\n')
2154  cbuf[i - 1] = 0;
2155  else
2156  cbuf[i] = 0;
2157 
2158  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2159  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2160 
2161  /* buggy avid, it puts EOI only at every 10th frame */
2162  if (!strncmp(cbuf, "AVID", 4)) {
2163  parse_avid(s, cbuf, len);
2164  } else if (!strcmp(cbuf, "CS=ITU601"))
2165  s->cs_itu601 = 1;
2166  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2167  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2168  s->flipped = 1;
2169  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2170  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2171  s->multiscope = 2;
2172  }
2173 
2174  av_free(cbuf);
2175  }
2176 
2177  return 0;
2178 }
2179 
2180 /* return the 8 bit start code value and update the search
2181  state. Return -1 if no start code found */
2182 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2183 {
2184  const uint8_t *buf_ptr;
2185  unsigned int v, v2;
2186  int val;
2187  int skipped = 0;
2188 
2189  buf_ptr = *pbuf_ptr;
2190  while (buf_end - buf_ptr > 1) {
2191  v = *buf_ptr++;
2192  v2 = *buf_ptr;
2193  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2194  val = *buf_ptr++;
2195  goto found;
2196  }
2197  skipped++;
2198  }
2199  buf_ptr = buf_end;
2200  val = -1;
2201 found:
2202  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2203  *pbuf_ptr = buf_ptr;
2204  return val;
2205 }
2206 
2208  const uint8_t **buf_ptr, const uint8_t *buf_end,
2209  const uint8_t **unescaped_buf_ptr,
2210  int *unescaped_buf_size)
2211 {
2212  int start_code;
2213  start_code = find_marker(buf_ptr, buf_end);
2214 
2215  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2216  if (!s->buffer)
2217  return AVERROR(ENOMEM);
2218 
2219  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2220  if (start_code == SOS && !s->ls) {
2221  const uint8_t *src = *buf_ptr;
2222  const uint8_t *ptr = src;
2223  uint8_t *dst = s->buffer;
2224 
2225  #define copy_data_segment(skip) do { \
2226  ptrdiff_t length = (ptr - src) - (skip); \
2227  if (length > 0) { \
2228  memcpy(dst, src, length); \
2229  dst += length; \
2230  src = ptr; \
2231  } \
2232  } while (0)
2233 
2234  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2235  ptr = buf_end;
2236  copy_data_segment(0);
2237  } else {
2238  while (ptr < buf_end) {
2239  uint8_t x = *(ptr++);
2240 
2241  if (x == 0xff) {
2242  ptrdiff_t skip = 0;
2243  while (ptr < buf_end && x == 0xff) {
2244  x = *(ptr++);
2245  skip++;
2246  }
2247 
2248  /* 0xFF, 0xFF, ... */
2249  if (skip > 1) {
2250  copy_data_segment(skip);
2251 
2252  /* decrement src as it is equal to ptr after the
2253  * copy_data_segment macro and we might want to
2254  * copy the current value of x later on */
2255  src--;
2256  }
2257 
2258  if (x < RST0 || x > RST7) {
2259  copy_data_segment(1);
2260  if (x)
2261  break;
2262  }
2263  }
2264  }
2265  if (src < ptr)
2266  copy_data_segment(0);
2267  }
2268  #undef copy_data_segment
2269 
2270  *unescaped_buf_ptr = s->buffer;
2271  *unescaped_buf_size = dst - s->buffer;
2272  memset(s->buffer + *unescaped_buf_size, 0,
2274 
2275  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2276  (buf_end - *buf_ptr) - (dst - s->buffer));
2277  } else if (start_code == SOS && s->ls) {
2278  const uint8_t *src = *buf_ptr;
2279  uint8_t *dst = s->buffer;
2280  int bit_count = 0;
2281  int t = 0, b = 0;
2282  PutBitContext pb;
2283 
2284  /* find marker */
2285  while (src + t < buf_end) {
2286  uint8_t x = src[t++];
2287  if (x == 0xff) {
2288  while ((src + t < buf_end) && x == 0xff)
2289  x = src[t++];
2290  if (x & 0x80) {
2291  t -= FFMIN(2, t);
2292  break;
2293  }
2294  }
2295  }
2296  bit_count = t * 8;
2297  init_put_bits(&pb, dst, t);
2298 
2299  /* unescape bitstream */
2300  while (b < t) {
2301  uint8_t x = src[b++];
2302  put_bits(&pb, 8, x);
2303  if (x == 0xFF && b < t) {
2304  x = src[b++];
2305  if (x & 0x80) {
2306  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2307  x &= 0x7f;
2308  }
2309  put_bits(&pb, 7, x);
2310  bit_count--;
2311  }
2312  }
2313  flush_put_bits(&pb);
2314 
2315  *unescaped_buf_ptr = dst;
2316  *unescaped_buf_size = (bit_count + 7) >> 3;
2317  memset(s->buffer + *unescaped_buf_size, 0,
2319  } else {
2320  *unescaped_buf_ptr = *buf_ptr;
2321  *unescaped_buf_size = buf_end - *buf_ptr;
2322  }
2323 
2324  return start_code;
2325 }
2326 
2328 {
2329  int i;
2330 
2331  if (s->iccentries) {
2332  for (i = 0; i < s->iccnum; i++)
2333  av_freep(&s->iccentries[i].data);
2334  av_freep(&s->iccentries);
2335  }
2336 
2337  s->iccread = 0;
2338  s->iccnum = 0;
2339 }
2340 
2341 // SMV JPEG just stacks several output frames into one JPEG picture
2342 // we handle that by setting up the cropping parameters appropriately
2344 {
2345  MJpegDecodeContext *s = avctx->priv_data;
2346  int ret;
2347 
2348  if (s->smv_next_frame > 0) {
2349  av_assert0(s->smv_frame->buf[0]);
2351  ret = av_frame_ref(frame, s->smv_frame);
2352  if (ret < 0)
2353  return ret;
2354  } else {
2355  av_assert0(frame->buf[0]);
2356  av_frame_unref(s->smv_frame);
2357  ret = av_frame_ref(s->smv_frame, frame);
2358  if (ret < 0)
2359  return ret;
2360  }
2361 
2362  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2363 
2364  frame->width = avctx->coded_width;
2365  frame->height = avctx->coded_height;
2366  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2367  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2368 
2369  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2370 
2371  if (s->smv_next_frame == 0)
2372  av_frame_unref(s->smv_frame);
2373 
2374  return 0;
2375 }
2376 
2378 {
2379  MJpegDecodeContext *s = avctx->priv_data;
2380  int ret;
2381 
2382  av_packet_unref(s->pkt);
2383  ret = ff_decode_get_packet(avctx, s->pkt);
2384  if (ret < 0)
2385  return ret;
2386 
2387 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2388  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2389  avctx->codec_id == AV_CODEC_ID_AMV) {
2390  ret = ff_sp5x_process_packet(avctx, s->pkt);
2391  if (ret < 0)
2392  return ret;
2393  }
2394 #endif
2395 
2396  s->buf_size = s->pkt->size;
2397 
2398  return 0;
2399 }
2400 
2402 {
2403  MJpegDecodeContext *s = avctx->priv_data;
2404  const uint8_t *buf_end, *buf_ptr;
2405  const uint8_t *unescaped_buf_ptr;
2406  int hshift, vshift;
2407  int unescaped_buf_size;
2408  int start_code;
2409  int i, index;
2410  int ret = 0;
2411  int is16bit;
2412  AVDictionaryEntry *e = NULL;
2413 
2414  s->force_pal8 = 0;
2415 
2416  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2417  return smv_process_frame(avctx, frame);
2418 
2419  av_dict_free(&s->exif_metadata);
2420  av_freep(&s->stereo3d);
2421  s->adobe_transform = -1;
2422 
2423  if (s->iccnum != 0)
2425 
2426  ret = mjpeg_get_packet(avctx);
2427  if (ret < 0)
2428  return ret;
2429 redo_for_pal8:
2430  buf_ptr = s->pkt->data;
2431  buf_end = s->pkt->data + s->pkt->size;
2432  while (buf_ptr < buf_end) {
2433  /* find start next marker */
2434  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2435  &unescaped_buf_ptr,
2436  &unescaped_buf_size);
2437  /* EOF */
2438  if (start_code < 0) {
2439  break;
2440  } else if (unescaped_buf_size > INT_MAX / 8) {
2441  av_log(avctx, AV_LOG_ERROR,
2442  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2443  start_code, unescaped_buf_size, s->pkt->size);
2444  return AVERROR_INVALIDDATA;
2445  }
2446  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2447  start_code, buf_end - buf_ptr);
2448 
2449  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2450 
2451  if (ret < 0) {
2452  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2453  goto fail;
2454  }
2455 
2456  s->start_code = start_code;
2457  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2458  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2459 
2460  /* process markers */
2461  if (start_code >= RST0 && start_code <= RST7) {
2462  av_log(avctx, AV_LOG_DEBUG,
2463  "restart marker: %d\n", start_code & 0x0f);
2464  /* APP fields */
2465  } else if (start_code >= APP0 && start_code <= APP15) {
2466  if ((ret = mjpeg_decode_app(s)) < 0)
2467  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2468  av_err2str(ret));
2469  /* Comment */
2470  } else if (start_code == COM) {
2471  ret = mjpeg_decode_com(s);
2472  if (ret < 0)
2473  return ret;
2474  } else if (start_code == DQT) {
2476  if (ret < 0)
2477  return ret;
2478  }
2479 
2480  ret = -1;
2481 
2482  if (!CONFIG_JPEGLS_DECODER &&
2483  (start_code == SOF48 || start_code == LSE)) {
2484  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2485  return AVERROR(ENOSYS);
2486  }
2487 
2488  if (avctx->skip_frame == AVDISCARD_ALL) {
2489  switch(start_code) {
2490  case SOF0:
2491  case SOF1:
2492  case SOF2:
2493  case SOF3:
2494  case SOF48:
2495  case SOI:
2496  case SOS:
2497  case EOI:
2498  break;
2499  default:
2500  goto skip;
2501  }
2502  }
2503 
2504  switch (start_code) {
2505  case SOI:
2506  s->restart_interval = 0;
2507  s->restart_count = 0;
2508  s->raw_image_buffer = buf_ptr;
2509  s->raw_image_buffer_size = buf_end - buf_ptr;
2510  /* nothing to do on SOI */
2511  break;
2512  case DHT:
2513  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2514  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2515  goto fail;
2516  }
2517  break;
2518  case SOF0:
2519  case SOF1:
2520  if (start_code == SOF0)
2521  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2522  else
2524  s->lossless = 0;
2525  s->ls = 0;
2526  s->progressive = 0;
2527  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2528  goto fail;
2529  break;
2530  case SOF2:
2531  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2532  s->lossless = 0;
2533  s->ls = 0;
2534  s->progressive = 1;
2535  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2536  goto fail;
2537  break;
2538  case SOF3:
2539  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2540  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2541  s->lossless = 1;
2542  s->ls = 0;
2543  s->progressive = 0;
2544  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2545  goto fail;
2546  break;
2547  case SOF48:
2548  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2549  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2550  s->lossless = 1;
2551  s->ls = 1;
2552  s->progressive = 0;
2553  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2554  goto fail;
2555  break;
2556  case LSE:
2557  if (!CONFIG_JPEGLS_DECODER ||
2558  (ret = ff_jpegls_decode_lse(s)) < 0)
2559  goto fail;
2560  if (ret == 1)
2561  goto redo_for_pal8;
2562  break;
2563  case EOI:
2564 eoi_parser:
2565  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2566  s->progressive && s->cur_scan && s->got_picture)
2568  s->cur_scan = 0;
2569  if (!s->got_picture) {
2570  av_log(avctx, AV_LOG_WARNING,
2571  "Found EOI before any SOF, ignoring\n");
2572  break;
2573  }
2574  if (s->interlaced) {
2575  s->bottom_field ^= 1;
2576  /* if not bottom field, do not output image yet */
2577  if (s->bottom_field == !s->interlace_polarity)
2578  break;
2579  }
2580  if (avctx->skip_frame == AVDISCARD_ALL) {
2581  s->got_picture = 0;
2582  ret = AVERROR(EAGAIN);
2583  goto the_end_no_picture;
2584  }
2585  if (s->avctx->hwaccel) {
2586  ret = s->avctx->hwaccel->end_frame(s->avctx);
2587  if (ret < 0)
2588  return ret;
2589 
2590  av_freep(&s->hwaccel_picture_private);
2591  }
2592  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2593  return ret;
2594  s->got_picture = 0;
2595 
2596  frame->pkt_dts = s->pkt->dts;
2597 
2598  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2599  int qp = FFMAX3(s->qscale[0],
2600  s->qscale[1],
2601  s->qscale[2]);
2602 
2603  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2604  }
2605 
2606  goto the_end;
2607  case SOS:
2608  s->raw_scan_buffer = buf_ptr;
2609  s->raw_scan_buffer_size = buf_end - buf_ptr;
2610 
2611  s->cur_scan++;
2612  if (avctx->skip_frame == AVDISCARD_ALL) {
2613  skip_bits(&s->gb, get_bits_left(&s->gb));
2614  break;
2615  }
2616 
2617  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2618  (avctx->err_recognition & AV_EF_EXPLODE))
2619  goto fail;
2620  break;
2621  case DRI:
2622  if ((ret = mjpeg_decode_dri(s)) < 0)
2623  return ret;
2624  break;
2625  case SOF5:
2626  case SOF6:
2627  case SOF7:
2628  case SOF9:
2629  case SOF10:
2630  case SOF11:
2631  case SOF13:
2632  case SOF14:
2633  case SOF15:
2634  case JPG:
2635  av_log(avctx, AV_LOG_ERROR,
2636  "mjpeg: unsupported coding type (%x)\n", start_code);
2637  break;
2638  }
2639 
2640 skip:
2641  /* eof process start code */
2642  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2643  av_log(avctx, AV_LOG_DEBUG,
2644  "marker parser used %d bytes (%d bits)\n",
2645  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2646  }
2647  if (s->got_picture && s->cur_scan) {
2648  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2649  goto eoi_parser;
2650  }
2651  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2652  return AVERROR_INVALIDDATA;
2653 fail:
2654  s->got_picture = 0;
2655  return ret;
2656 the_end:
2657 
2658  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2659 
2660  if (AV_RB32(s->upscale_h)) {
2661  int p;
2663  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2664  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2665  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2666  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2667  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2668  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2669  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2670  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2671  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2672  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2673  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2674  );
2675  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2676  if (ret)
2677  return ret;
2678 
2679  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2680  for (p = 0; p<s->nb_components; p++) {
2681  uint8_t *line = s->picture_ptr->data[p];
2682  int w = s->width;
2683  int h = s->height;
2684  if (!s->upscale_h[p])
2685  continue;
2686  if (p==1 || p==2) {
2687  w = AV_CEIL_RSHIFT(w, hshift);
2688  h = AV_CEIL_RSHIFT(h, vshift);
2689  }
2690  if (s->upscale_v[p] == 1)
2691  h = (h+1)>>1;
2692  av_assert0(w > 0);
2693  for (i = 0; i < h; i++) {
2694  if (s->upscale_h[p] == 1) {
2695  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2696  else line[w - 1] = line[(w - 1) / 2];
2697  for (index = w - 2; index > 0; index--) {
2698  if (is16bit)
2699  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2700  else
2701  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2702  }
2703  } else if (s->upscale_h[p] == 2) {
2704  if (is16bit) {
2705  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2706  if (w > 1)
2707  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2708  } else {
2709  line[w - 1] = line[(w - 1) / 3];
2710  if (w > 1)
2711  line[w - 2] = line[w - 1];
2712  }
2713  for (index = w - 3; index > 0; index--) {
2714  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2715  }
2716  }
2717  line += s->linesize[p];
2718  }
2719  }
2720  }
2721  if (AV_RB32(s->upscale_v)) {
2722  int p;
2724  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2727  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2728  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2729  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2730  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2731  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2732  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2733  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2734  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2735  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2736  );
2737  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2738  if (ret)
2739  return ret;
2740 
2741  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2742  for (p = 0; p < s->nb_components; p++) {
2743  uint8_t *dst;
2744  int w = s->width;
2745  int h = s->height;
2746  if (!s->upscale_v[p])
2747  continue;
2748  if (p==1 || p==2) {
2749  w = AV_CEIL_RSHIFT(w, hshift);
2750  h = AV_CEIL_RSHIFT(h, vshift);
2751  }
2752  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2753  for (i = h - 1; i; i--) {
2754  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2755  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2756  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2757  memcpy(dst, src1, w);
2758  } else {
2759  for (index = 0; index < w; index++)
2760  dst[index] = (src1[index] + src2[index]) >> 1;
2761  }
2762  dst -= s->linesize[p];
2763  }
2764  }
2765  }
2766  if (s->flipped && !s->rgb) {
2767  int j;
2768  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2769  if (ret)
2770  return ret;
2771 
2772  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2773  for (index=0; index<s->nb_components; index++) {
2774  uint8_t *dst = s->picture_ptr->data[index];
2775  int w = s->picture_ptr->width;
2776  int h = s->picture_ptr->height;
2777  if(index && index<3){
2778  w = AV_CEIL_RSHIFT(w, hshift);
2779  h = AV_CEIL_RSHIFT(h, vshift);
2780  }
2781  if(dst){
2782  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2783  for (i=0; i<h/2; i++) {
2784  for (j=0; j<w; j++)
2785  FFSWAP(int, dst[j], dst2[j]);
2786  dst += s->picture_ptr->linesize[index];
2787  dst2 -= s->picture_ptr->linesize[index];
2788  }
2789  }
2790  }
2791  }
2792  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2793  int w = s->picture_ptr->width;
2794  int h = s->picture_ptr->height;
2795  av_assert0(s->nb_components == 4);
2796  for (i=0; i<h; i++) {
2797  int j;
2798  uint8_t *dst[4];
2799  for (index=0; index<4; index++) {
2800  dst[index] = s->picture_ptr->data[index]
2801  + s->picture_ptr->linesize[index]*i;
2802  }
2803  for (j=0; j<w; j++) {
2804  int k = dst[3][j];
2805  int r = dst[0][j] * k;
2806  int g = dst[1][j] * k;
2807  int b = dst[2][j] * k;
2808  dst[0][j] = g*257 >> 16;
2809  dst[1][j] = b*257 >> 16;
2810  dst[2][j] = r*257 >> 16;
2811  dst[3][j] = 255;
2812  }
2813  }
2814  }
2815  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2816  int w = s->picture_ptr->width;
2817  int h = s->picture_ptr->height;
2818  av_assert0(s->nb_components == 4);
2819  for (i=0; i<h; i++) {
2820  int j;
2821  uint8_t *dst[4];
2822  for (index=0; index<4; index++) {
2823  dst[index] = s->picture_ptr->data[index]
2824  + s->picture_ptr->linesize[index]*i;
2825  }
2826  for (j=0; j<w; j++) {
2827  int k = dst[3][j];
2828  int r = (255 - dst[0][j]) * k;
2829  int g = (128 - dst[1][j]) * k;
2830  int b = (128 - dst[2][j]) * k;
2831  dst[0][j] = r*257 >> 16;
2832  dst[1][j] = (g*257 >> 16) + 128;
2833  dst[2][j] = (b*257 >> 16) + 128;
2834  dst[3][j] = 255;
2835  }
2836  }
2837  }
2838 
2839  if (s->stereo3d) {
2841  if (stereo) {
2842  stereo->type = s->stereo3d->type;
2843  stereo->flags = s->stereo3d->flags;
2844  }
2845  av_freep(&s->stereo3d);
2846  }
2847 
2848  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2849  AVFrameSideData *sd;
2850  size_t offset = 0;
2851  int total_size = 0;
2852  int i;
2853 
2854  /* Sum size of all parts. */
2855  for (i = 0; i < s->iccnum; i++)
2856  total_size += s->iccentries[i].length;
2857 
2859  if (!sd) {
2860  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2861  return AVERROR(ENOMEM);
2862  }
2863 
2864  /* Reassemble the parts, which are now in-order. */
2865  for (i = 0; i < s->iccnum; i++) {
2866  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2867  offset += s->iccentries[i].length;
2868  }
2869  }
2870 
2871  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2872  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2873  int orientation = strtol(value, &endptr, 0);
2874 
2875  if (!*endptr) {
2876  AVFrameSideData *sd = NULL;
2877 
2878  if (orientation >= 2 && orientation <= 8) {
2879  int32_t *matrix;
2880 
2882  if (!sd) {
2883  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2884  return AVERROR(ENOMEM);
2885  }
2886 
2887  matrix = (int32_t *)sd->data;
2888 
2889  switch (orientation) {
2890  case 2:
2893  break;
2894  case 3:
2896  break;
2897  case 4:
2900  break;
2901  case 5:
2904  break;
2905  case 6:
2907  break;
2908  case 7:
2911  break;
2912  case 8:
2914  break;
2915  default:
2916  av_assert0(0);
2917  }
2918  }
2919  }
2920  }
2921 
2922  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2923  av_dict_free(&s->exif_metadata);
2924 
2925  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2926  ret = smv_process_frame(avctx, frame);
2927  if (ret < 0) {
2929  return ret;
2930  }
2931  }
2932  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2933  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2934  avctx->coded_height > s->orig_height) {
2935  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2936  frame->crop_top = frame->height - avctx->height;
2937  }
2938 
2939  ret = 0;
2940 
2941 the_end_no_picture:
2942  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2943  buf_end - buf_ptr);
2944 
2945  return ret;
2946 }
2947 
2948 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2949  * even without having called ff_mjpeg_decode_init(). */
2951 {
2952  MJpegDecodeContext *s = avctx->priv_data;
2953  int i, j;
2954 
2955  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2956  av_log(avctx, AV_LOG_INFO, "Single field\n");
2957  }
2958 
2959  if (s->picture) {
2960  av_frame_free(&s->picture);
2961  s->picture_ptr = NULL;
2962  } else if (s->picture_ptr)
2963  av_frame_unref(s->picture_ptr);
2964 
2965  av_frame_free(&s->smv_frame);
2966 
2967  av_freep(&s->buffer);
2968  av_freep(&s->stereo3d);
2969  av_freep(&s->ljpeg_buffer);
2970  s->ljpeg_buffer_size = 0;
2971 
2972  for (i = 0; i < 3; i++) {
2973  for (j = 0; j < 4; j++)
2974  ff_free_vlc(&s->vlcs[i][j]);
2975  }
2976  for (i = 0; i < MAX_COMPONENTS; i++) {
2977  av_freep(&s->blocks[i]);
2978  av_freep(&s->last_nnz[i]);
2979  }
2980  av_dict_free(&s->exif_metadata);
2981 
2983 
2984  av_freep(&s->hwaccel_picture_private);
2985  av_freep(&s->jls_state);
2986 
2987  return 0;
2988 }
2989 
2990 static void decode_flush(AVCodecContext *avctx)
2991 {
2992  MJpegDecodeContext *s = avctx->priv_data;
2993  s->got_picture = 0;
2994 
2995  s->smv_next_frame = 0;
2996  av_frame_unref(s->smv_frame);
2997 }
2998 
2999 #if CONFIG_MJPEG_DECODER
3000 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
3001 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
3002 static const AVOption options[] = {
3003  { "extern_huff", "Use external huffman table.",
3004  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
3005  { NULL },
3006 };
3007 
3008 static const AVClass mjpegdec_class = {
3009  .class_name = "MJPEG decoder",
3010  .item_name = av_default_item_name,
3011  .option = options,
3012  .version = LIBAVUTIL_VERSION_INT,
3013 };
3014 
3015 const FFCodec ff_mjpeg_decoder = {
3016  .p.name = "mjpeg",
3017  .p.long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
3018  .p.type = AVMEDIA_TYPE_VIDEO,
3019  .p.id = AV_CODEC_ID_MJPEG,
3020  .priv_data_size = sizeof(MJpegDecodeContext),
3022  .close = ff_mjpeg_decode_end,
3024  .flush = decode_flush,
3025  .p.capabilities = AV_CODEC_CAP_DR1,
3026  .p.max_lowres = 3,
3027  .p.priv_class = &mjpegdec_class,
3028  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
3031  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3032 #if CONFIG_MJPEG_NVDEC_HWACCEL
3033  HWACCEL_NVDEC(mjpeg),
3034 #endif
3035 #if CONFIG_MJPEG_VAAPI_HWACCEL
3036  HWACCEL_VAAPI(mjpeg),
3037 #endif
3038  NULL
3039  },
3040 };
3041 #endif
3042 #if CONFIG_THP_DECODER
3043 const FFCodec ff_thp_decoder = {
3044  .p.name = "thp",
3045  .p.long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
3046  .p.type = AVMEDIA_TYPE_VIDEO,
3047  .p.id = AV_CODEC_ID_THP,
3048  .priv_data_size = sizeof(MJpegDecodeContext),
3050  .close = ff_mjpeg_decode_end,
3052  .flush = decode_flush,
3053  .p.capabilities = AV_CODEC_CAP_DR1,
3054  .p.max_lowres = 3,
3057 };
3058 #endif
3059 
3060 #if CONFIG_SMVJPEG_DECODER
3061 const FFCodec ff_smvjpeg_decoder = {
3062  .p.name = "smvjpeg",
3063  .p.long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
3064  .p.type = AVMEDIA_TYPE_VIDEO,
3065  .p.id = AV_CODEC_ID_SMVJPEG,
3066  .priv_data_size = sizeof(MJpegDecodeContext),
3068  .close = ff_mjpeg_decode_end,
3070  .flush = decode_flush,
3071  .p.capabilities = AV_CODEC_CAP_DR1,
3074 };
3075 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1379
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:205
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:206
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:966
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1100
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1395
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:672
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2990
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:957
SOF0
@ SOF0
Definition: mjpeg.h:39
matrix
Definition: vc1dsp.c:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1344
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:696
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:273
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:113
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:196
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
AVFrame::width
int width
Definition: frame.h:397
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:448
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1651
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2343
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:995
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:34
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:791
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:143
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:112
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2377
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:150
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:240
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1323
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
APP15
@ APP15
Definition: mjpeg.h:94
init
static int init
Definition: av_tx.c:47
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2702
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:531
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
rgb
Definition: rpzaenc.c:59
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1241
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1411
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:398
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:122
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1695
fail
#define fail()
Definition: checkasm.h:131
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:450
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1647
GetBitContext
Definition: get_bits.h:61
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2142
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:59
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:577
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:390
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:35
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
av_bswap32
#define av_bswap32
Definition: bswap.h:33
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
aligned
static int aligned(int val)
Definition: dashdec.c:168
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:858
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:418
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1845
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1649
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1040
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:102
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:419
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1650
g
const char * g
Definition: vf_curves.c:117
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:367
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:354
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:38
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:417
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2327
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2950
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2401
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:425
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:396
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:60
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:110
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:194
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:397
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1602
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:205
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:198
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:424
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
tiff_common.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1426
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:975
lowres
static int lowres
Definition: ffplay.c:335
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1547
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1355
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1455
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:507
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1075
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:324
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:50
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:876
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1637
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:57
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:263
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:51
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
AVCodecHWConfigInternal
Definition: hwconfig.h:29
ff_mjpeg_bits_dc_luminance
const uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1234
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:171
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2182
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:210
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:41
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:168
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2042
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:809
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1648
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:30
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1815
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:73
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:48
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1330
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:980
len
int len
Definition: vorbis_enc_data.h:426
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: codec_internal.h:46
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:582
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:949
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:29
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1327
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2207
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:157
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:389
AVFrame::height
int height
Definition: frame.h:397
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:260
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:621
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1322
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:302
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:577
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1827
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1037
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:79
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
AVDictionaryEntry::value
char * value
Definition: dict.h:81
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:354
re
float re
Definition: fft.c:79