FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/display.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/avassert.h"
36 #include "libavutil/opt.h"
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "copy_block.h"
40 #include "decode.h"
41 #include "hwconfig.h"
42 #include "idctdsp.h"
43 #include "internal.h"
44 #include "jpegtables.h"
45 #include "mjpeg.h"
46 #include "mjpegdec.h"
47 #include "jpeglsdec.h"
48 #include "profiles.h"
49 #include "put_bits.h"
50 #include "tiff.h"
51 #include "exif.h"
52 #include "bytestream.h"
53 
54 
56 {
57  static const struct {
58  int class;
59  int index;
60  const uint8_t *bits;
61  const uint8_t *values;
62  int length;
63  } ht[] = {
65  avpriv_mjpeg_val_dc, 12 },
67  avpriv_mjpeg_val_dc, 12 },
76  };
77  int i, ret;
78 
79  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
80  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
81  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
82  ht[i].bits, ht[i].values,
83  ht[i].class == 1, s->avctx);
84  if (ret < 0)
85  return ret;
86 
87  if (ht[i].class < 2) {
88  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
89  ht[i].bits + 1, 16);
90  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
91  ht[i].values, ht[i].length);
92  }
93  }
94 
95  return 0;
96 }
97 
98 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
99 {
100  s->buggy_avid = 1;
101  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
102  s->interlace_polarity = 1;
103  if (len > 14 && buf[12] == 2) /* 2 - PAL */
104  s->interlace_polarity = 0;
105  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
106  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
107 }
108 
109 static void init_idct(AVCodecContext *avctx)
110 {
111  MJpegDecodeContext *s = avctx->priv_data;
112 
113  ff_idctdsp_init(&s->idsp, avctx);
114  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
116 }
117 
119 {
120  MJpegDecodeContext *s = avctx->priv_data;
121  int ret;
122 
123  if (!s->picture_ptr) {
124  s->picture = av_frame_alloc();
125  if (!s->picture)
126  return AVERROR(ENOMEM);
127  s->picture_ptr = s->picture;
128  }
129 
130  s->pkt = av_packet_alloc();
131  if (!s->pkt)
132  return AVERROR(ENOMEM);
133 
134  s->avctx = avctx;
135  ff_blockdsp_init(&s->bdsp, avctx);
136  ff_hpeldsp_init(&s->hdsp, avctx->flags);
137  init_idct(avctx);
138  s->buffer_size = 0;
139  s->buffer = NULL;
140  s->start_code = -1;
141  s->first_picture = 1;
142  s->got_picture = 0;
143  s->orig_height = avctx->coded_height;
145  avctx->colorspace = AVCOL_SPC_BT470BG;
146  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
147 
148  if ((ret = init_default_huffman_tables(s)) < 0)
149  return ret;
150 
151  if (s->extern_huff) {
152  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
153  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
154  return ret;
155  if (ff_mjpeg_decode_dht(s)) {
156  av_log(avctx, AV_LOG_ERROR,
157  "error using external huffman table, switching back to internal\n");
158  if ((ret = init_default_huffman_tables(s)) < 0)
159  return ret;
160  }
161  }
162  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
163  s->interlace_polarity = 1; /* bottom field first */
164  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
165  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
166  if (avctx->codec_tag == AV_RL32("MJPG"))
167  s->interlace_polarity = 1;
168  }
169 
170  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
171  if (avctx->extradata_size >= 4)
172  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
173 
174  if (s->smv_frames_per_jpeg <= 0) {
175  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
176  return AVERROR_INVALIDDATA;
177  }
178 
179  s->smv_frame = av_frame_alloc();
180  if (!s->smv_frame)
181  return AVERROR(ENOMEM);
182  } else if (avctx->extradata_size > 8
183  && AV_RL32(avctx->extradata) == 0x2C
184  && AV_RL32(avctx->extradata+4) == 0x18) {
185  parse_avid(s, avctx->extradata, avctx->extradata_size);
186  }
187 
188  if (avctx->codec->id == AV_CODEC_ID_AMV)
189  s->flipped = 1;
190 
191  return 0;
192 }
193 
194 
195 /* quantize tables */
197 {
198  int len, index, i;
199 
200  len = get_bits(&s->gb, 16) - 2;
201 
202  if (8*len > get_bits_left(&s->gb)) {
203  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
204  return AVERROR_INVALIDDATA;
205  }
206 
207  while (len >= 65) {
208  int pr = get_bits(&s->gb, 4);
209  if (pr > 1) {
210  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
211  return AVERROR_INVALIDDATA;
212  }
213  index = get_bits(&s->gb, 4);
214  if (index >= 4)
215  return -1;
216  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
217  /* read quant table */
218  for (i = 0; i < 64; i++) {
219  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
220  if (s->quant_matrixes[index][i] == 0) {
221  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
222  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
223  if (s->avctx->err_recognition & AV_EF_EXPLODE)
224  return AVERROR_INVALIDDATA;
225  }
226  }
227 
228  // XXX FIXME fine-tune, and perhaps add dc too
229  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
230  s->quant_matrixes[index][8]) >> 1;
231  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
232  index, s->qscale[index]);
233  len -= 1 + 64 * (1+pr);
234  }
235  return 0;
236 }
237 
238 /* decode huffman tables and build VLC decoders */
240 {
241  int len, index, i, class, n, v;
242  uint8_t bits_table[17];
243  uint8_t val_table[256];
244  int ret = 0;
245 
246  len = get_bits(&s->gb, 16) - 2;
247 
248  if (8*len > get_bits_left(&s->gb)) {
249  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
250  return AVERROR_INVALIDDATA;
251  }
252 
253  while (len > 0) {
254  if (len < 17)
255  return AVERROR_INVALIDDATA;
256  class = get_bits(&s->gb, 4);
257  if (class >= 2)
258  return AVERROR_INVALIDDATA;
259  index = get_bits(&s->gb, 4);
260  if (index >= 4)
261  return AVERROR_INVALIDDATA;
262  n = 0;
263  for (i = 1; i <= 16; i++) {
264  bits_table[i] = get_bits(&s->gb, 8);
265  n += bits_table[i];
266  }
267  len -= 17;
268  if (len < n || n > 256)
269  return AVERROR_INVALIDDATA;
270 
271  for (i = 0; i < n; i++) {
272  v = get_bits(&s->gb, 8);
273  val_table[i] = v;
274  }
275  len -= n;
276 
277  /* build VLC and flush previous vlc if present */
278  ff_free_vlc(&s->vlcs[class][index]);
279  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
280  class, index, n);
281  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
282  val_table, class > 0, s->avctx)) < 0)
283  return ret;
284 
285  if (class > 0) {
286  ff_free_vlc(&s->vlcs[2][index]);
287  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
288  val_table, 0, s->avctx)) < 0)
289  return ret;
290  }
291 
292  for (i = 0; i < 16; i++)
293  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
294  for (i = 0; i < 256; i++)
295  s->raw_huffman_values[class][index][i] = val_table[i];
296  }
297  return 0;
298 }
299 
301 {
302  int len, nb_components, i, width, height, bits, ret, size_change;
303  unsigned pix_fmt_id;
304  int h_count[MAX_COMPONENTS] = { 0 };
305  int v_count[MAX_COMPONENTS] = { 0 };
306 
307  s->cur_scan = 0;
308  memset(s->upscale_h, 0, sizeof(s->upscale_h));
309  memset(s->upscale_v, 0, sizeof(s->upscale_v));
310 
311  len = get_bits(&s->gb, 16);
312  bits = get_bits(&s->gb, 8);
313 
314  if (bits > 16 || bits < 1) {
315  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
316  return AVERROR_INVALIDDATA;
317  }
318 
319  if (s->avctx->bits_per_raw_sample != bits) {
320  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
321  s->avctx->bits_per_raw_sample = bits;
322  init_idct(s->avctx);
323  }
324  if (s->pegasus_rct)
325  bits = 9;
326  if (bits == 9 && !s->pegasus_rct)
327  s->rct = 1; // FIXME ugly
328 
329  if(s->lossless && s->avctx->lowres){
330  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
331  return -1;
332  }
333 
334  height = get_bits(&s->gb, 16);
335  width = get_bits(&s->gb, 16);
336 
337  // HACK for odd_height.mov
338  if (s->interlaced && s->width == width && s->height == height + 1)
339  height= s->height;
340 
341  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
342  if (av_image_check_size(width, height, 0, s->avctx) < 0)
343  return AVERROR_INVALIDDATA;
344  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
345  return AVERROR_INVALIDDATA;
346 
347  nb_components = get_bits(&s->gb, 8);
348  if (nb_components <= 0 ||
349  nb_components > MAX_COMPONENTS)
350  return -1;
351  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
352  if (nb_components != s->nb_components) {
353  av_log(s->avctx, AV_LOG_ERROR,
354  "nb_components changing in interlaced picture\n");
355  return AVERROR_INVALIDDATA;
356  }
357  }
358  if (s->ls && !(bits <= 8 || nb_components == 1)) {
360  "JPEG-LS that is not <= 8 "
361  "bits/component or 16-bit gray");
362  return AVERROR_PATCHWELCOME;
363  }
364  if (len != 8 + 3 * nb_components) {
365  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
366  return AVERROR_INVALIDDATA;
367  }
368 
369  s->nb_components = nb_components;
370  s->h_max = 1;
371  s->v_max = 1;
372  for (i = 0; i < nb_components; i++) {
373  /* component id */
374  s->component_id[i] = get_bits(&s->gb, 8) - 1;
375  h_count[i] = get_bits(&s->gb, 4);
376  v_count[i] = get_bits(&s->gb, 4);
377  /* compute hmax and vmax (only used in interleaved case) */
378  if (h_count[i] > s->h_max)
379  s->h_max = h_count[i];
380  if (v_count[i] > s->v_max)
381  s->v_max = v_count[i];
382  s->quant_index[i] = get_bits(&s->gb, 8);
383  if (s->quant_index[i] >= 4) {
384  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
385  return AVERROR_INVALIDDATA;
386  }
387  if (!h_count[i] || !v_count[i]) {
388  av_log(s->avctx, AV_LOG_ERROR,
389  "Invalid sampling factor in component %d %d:%d\n",
390  i, h_count[i], v_count[i]);
391  return AVERROR_INVALIDDATA;
392  }
393 
394  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
395  i, h_count[i], v_count[i],
396  s->component_id[i], s->quant_index[i]);
397  }
398  if ( nb_components == 4
399  && s->component_id[0] == 'C' - 1
400  && s->component_id[1] == 'M' - 1
401  && s->component_id[2] == 'Y' - 1
402  && s->component_id[3] == 'K' - 1)
403  s->adobe_transform = 0;
404 
405  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
406  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
407  return AVERROR_PATCHWELCOME;
408  }
409 
410  if (s->bayer) {
411  if (nb_components == 2) {
412  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
413  width stored in their SOF3 markers is the width of each one. We only output
414  a single component, therefore we need to adjust the output image width. We
415  handle the deinterleaving (but not the debayering) in this file. */
416  width *= 2;
417  }
418  /* They can also contain 1 component, which is double the width and half the height
419  of the final image (rows are interleaved). We don't handle the decoding in this
420  file, but leave that to the TIFF/DNG decoder. */
421  }
422 
423  /* if different size, realloc/alloc picture */
424  if (width != s->width || height != s->height || bits != s->bits ||
425  memcmp(s->h_count, h_count, sizeof(h_count)) ||
426  memcmp(s->v_count, v_count, sizeof(v_count))) {
427  size_change = 1;
428 
429  s->width = width;
430  s->height = height;
431  s->bits = bits;
432  memcpy(s->h_count, h_count, sizeof(h_count));
433  memcpy(s->v_count, v_count, sizeof(v_count));
434  s->interlaced = 0;
435  s->got_picture = 0;
436 
437  /* test interlaced mode */
438  if (s->first_picture &&
439  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
440  s->orig_height != 0 &&
441  s->height < ((s->orig_height * 3) / 4)) {
442  s->interlaced = 1;
443  s->bottom_field = s->interlace_polarity;
444  s->picture_ptr->interlaced_frame = 1;
445  s->picture_ptr->top_field_first = !s->interlace_polarity;
446  height *= 2;
447  }
448 
449  ret = ff_set_dimensions(s->avctx, width, height);
450  if (ret < 0)
451  return ret;
452 
453  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
454  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
455  s->orig_height < height)
456  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
457 
458  s->first_picture = 0;
459  } else {
460  size_change = 0;
461  }
462 
463  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
464  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
465  if (s->avctx->height <= 0)
466  return AVERROR_INVALIDDATA;
467  }
468 
469  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
470  if (s->progressive) {
471  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
472  return AVERROR_INVALIDDATA;
473  }
474  } else {
475  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
476  s->rgb = 1;
477  else if (!s->lossless)
478  s->rgb = 0;
479  /* XXX: not complete test ! */
480  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
481  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
482  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
483  (s->h_count[3] << 4) | s->v_count[3];
484  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
485  /* NOTE we do not allocate pictures large enough for the possible
486  * padding of h/v_count being 4 */
487  if (!(pix_fmt_id & 0xD0D0D0D0))
488  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
489  if (!(pix_fmt_id & 0x0D0D0D0D))
490  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
491 
492  for (i = 0; i < 8; i++) {
493  int j = 6 + (i&1) - (i&6);
494  int is = (pix_fmt_id >> (4*i)) & 0xF;
495  int js = (pix_fmt_id >> (4*j)) & 0xF;
496 
497  if (is == 1 && js != 2 && (i < 2 || i > 5))
498  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
499  if (is == 1 && js != 2 && (i < 2 || i > 5))
500  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
501 
502  if (is == 1 && js == 2) {
503  if (i & 1) s->upscale_h[j/2] = 1;
504  else s->upscale_v[j/2] = 1;
505  }
506  }
507 
508  if (s->bayer) {
509  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
510  goto unk_pixfmt;
511  }
512 
513  switch (pix_fmt_id) {
514  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
515  if (!s->bayer)
516  goto unk_pixfmt;
517  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
518  break;
519  case 0x11111100:
520  if (s->rgb)
521  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
522  else {
523  if ( s->adobe_transform == 0
524  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
525  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
526  } else {
527  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
528  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
529  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
530  }
531  }
532  av_assert0(s->nb_components == 3);
533  break;
534  case 0x11111111:
535  if (s->rgb)
536  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
537  else {
538  if (s->adobe_transform == 0 && s->bits <= 8) {
539  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
540  } else {
541  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
542  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
543  }
544  }
545  av_assert0(s->nb_components == 4);
546  break;
547  case 0x22111122:
548  case 0x22111111:
549  if (s->adobe_transform == 0 && s->bits <= 8) {
550  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
551  s->upscale_v[1] = s->upscale_v[2] = 1;
552  s->upscale_h[1] = s->upscale_h[2] = 1;
553  } else if (s->adobe_transform == 2 && s->bits <= 8) {
554  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
555  s->upscale_v[1] = s->upscale_v[2] = 1;
556  s->upscale_h[1] = s->upscale_h[2] = 1;
557  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
558  } else {
559  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
560  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
561  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
562  }
563  av_assert0(s->nb_components == 4);
564  break;
565  case 0x12121100:
566  case 0x22122100:
567  case 0x21211100:
568  case 0x21112100:
569  case 0x22211200:
570  case 0x22221100:
571  case 0x22112200:
572  case 0x11222200:
573  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
574  else
575  goto unk_pixfmt;
576  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
577  break;
578  case 0x11000000:
579  case 0x13000000:
580  case 0x14000000:
581  case 0x31000000:
582  case 0x33000000:
583  case 0x34000000:
584  case 0x41000000:
585  case 0x43000000:
586  case 0x44000000:
587  if(s->bits <= 8)
588  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
589  else
590  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
591  break;
592  case 0x12111100:
593  case 0x14121200:
594  case 0x14111100:
595  case 0x22211100:
596  case 0x22112100:
597  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
598  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
599  else
600  goto unk_pixfmt;
601  s->upscale_v[0] = s->upscale_v[1] = 1;
602  } else {
603  if (pix_fmt_id == 0x14111100)
604  s->upscale_v[1] = s->upscale_v[2] = 1;
605  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
606  else
607  goto unk_pixfmt;
608  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
609  }
610  break;
611  case 0x21111100:
612  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
613  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
614  else
615  goto unk_pixfmt;
616  s->upscale_h[0] = s->upscale_h[1] = 1;
617  } else {
618  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
619  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
620  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
621  }
622  break;
623  case 0x31111100:
624  if (s->bits > 8)
625  goto unk_pixfmt;
626  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
627  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
628  s->upscale_h[1] = s->upscale_h[2] = 2;
629  break;
630  case 0x22121100:
631  case 0x22111200:
632  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
633  else
634  goto unk_pixfmt;
635  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
636  break;
637  case 0x22111100:
638  case 0x23111100:
639  case 0x42111100:
640  case 0x24111100:
641  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
642  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
643  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
644  if (pix_fmt_id == 0x42111100) {
645  if (s->bits > 8)
646  goto unk_pixfmt;
647  s->upscale_h[1] = s->upscale_h[2] = 1;
648  } else if (pix_fmt_id == 0x24111100) {
649  if (s->bits > 8)
650  goto unk_pixfmt;
651  s->upscale_v[1] = s->upscale_v[2] = 1;
652  } else if (pix_fmt_id == 0x23111100) {
653  if (s->bits > 8)
654  goto unk_pixfmt;
655  s->upscale_v[1] = s->upscale_v[2] = 2;
656  }
657  break;
658  case 0x41111100:
659  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
660  else
661  goto unk_pixfmt;
662  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
663  break;
664  default:
665  unk_pixfmt:
666  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
667  memset(s->upscale_h, 0, sizeof(s->upscale_h));
668  memset(s->upscale_v, 0, sizeof(s->upscale_v));
669  return AVERROR_PATCHWELCOME;
670  }
671  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
672  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
673  return AVERROR_PATCHWELCOME;
674  }
675  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
676  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
677  return AVERROR_PATCHWELCOME;
678  }
679  if (s->ls) {
680  memset(s->upscale_h, 0, sizeof(s->upscale_h));
681  memset(s->upscale_v, 0, sizeof(s->upscale_v));
682  if (s->nb_components == 3) {
683  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
684  } else if (s->nb_components != 1) {
685  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
686  return AVERROR_PATCHWELCOME;
687  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
688  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
689  else if (s->bits <= 8)
690  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
691  else
692  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
693  }
694 
695  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
696  if (!s->pix_desc) {
697  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
698  return AVERROR_BUG;
699  }
700 
701  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
702  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
703  } else {
704  enum AVPixelFormat pix_fmts[] = {
705 #if CONFIG_MJPEG_NVDEC_HWACCEL
707 #endif
708 #if CONFIG_MJPEG_VAAPI_HWACCEL
710 #endif
711  s->avctx->pix_fmt,
713  };
714  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
715  if (s->hwaccel_pix_fmt < 0)
716  return AVERROR(EINVAL);
717 
718  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
719  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
720  }
721 
722  if (s->avctx->skip_frame == AVDISCARD_ALL) {
723  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
724  s->picture_ptr->key_frame = 1;
725  s->got_picture = 1;
726  return 0;
727  }
728 
729  av_frame_unref(s->picture_ptr);
730  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
731  return -1;
732  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
733  s->picture_ptr->key_frame = 1;
734  s->got_picture = 1;
735 
736  // Lets clear the palette to avoid leaving uninitialized values in it
737  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
738  memset(s->picture_ptr->data[1], 0, 1024);
739 
740  for (i = 0; i < 4; i++)
741  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
742 
743  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
744  s->width, s->height, s->linesize[0], s->linesize[1],
745  s->interlaced, s->avctx->height);
746 
747  }
748 
749  if ((s->rgb && !s->lossless && !s->ls) ||
750  (!s->rgb && s->ls && s->nb_components > 1) ||
751  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
752  ) {
753  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
754  return AVERROR_PATCHWELCOME;
755  }
756 
757  /* totally blank picture as progressive JPEG will only add details to it */
758  if (s->progressive) {
759  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
760  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
761  for (i = 0; i < s->nb_components; i++) {
762  int size = bw * bh * s->h_count[i] * s->v_count[i];
763  av_freep(&s->blocks[i]);
764  av_freep(&s->last_nnz[i]);
765  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
766  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
767  if (!s->blocks[i] || !s->last_nnz[i])
768  return AVERROR(ENOMEM);
769  s->block_stride[i] = bw * s->h_count[i];
770  }
771  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
772  }
773 
774  if (s->avctx->hwaccel) {
775  s->hwaccel_picture_private =
776  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
777  if (!s->hwaccel_picture_private)
778  return AVERROR(ENOMEM);
779 
780  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
781  s->raw_image_buffer_size);
782  if (ret < 0)
783  return ret;
784  }
785 
786  return 0;
787 }
788 
789 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
790 {
791  int code;
792  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
793  if (code < 0 || code > 16) {
794  av_log(s->avctx, AV_LOG_WARNING,
795  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
796  0, dc_index, &s->vlcs[0][dc_index]);
797  return 0xfffff;
798  }
799 
800  if (code)
801  return get_xbits(&s->gb, code);
802  else
803  return 0;
804 }
805 
806 /* decode block and dequantize */
807 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
808  int dc_index, int ac_index, uint16_t *quant_matrix)
809 {
810  int code, i, j, level, val;
811 
812  /* DC coef */
813  val = mjpeg_decode_dc(s, dc_index);
814  if (val == 0xfffff) {
815  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
816  return AVERROR_INVALIDDATA;
817  }
818  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
819  val = av_clip_int16(val);
820  s->last_dc[component] = val;
821  block[0] = val;
822  /* AC coefs */
823  i = 0;
824  {OPEN_READER(re, &s->gb);
825  do {
826  UPDATE_CACHE(re, &s->gb);
827  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
828 
829  i += ((unsigned)code) >> 4;
830  code &= 0xf;
831  if (code) {
832  if (code > MIN_CACHE_BITS - 16)
833  UPDATE_CACHE(re, &s->gb);
834 
835  {
836  int cache = GET_CACHE(re, &s->gb);
837  int sign = (~cache) >> 31;
838  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
839  }
840 
841  LAST_SKIP_BITS(re, &s->gb, code);
842 
843  if (i > 63) {
844  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
845  return AVERROR_INVALIDDATA;
846  }
847  j = s->scantable.permutated[i];
848  block[j] = level * quant_matrix[i];
849  }
850  } while (i < 63);
851  CLOSE_READER(re, &s->gb);}
852 
853  return 0;
854 }
855 
857  int component, int dc_index,
858  uint16_t *quant_matrix, int Al)
859 {
860  unsigned val;
861  s->bdsp.clear_block(block);
862  val = mjpeg_decode_dc(s, dc_index);
863  if (val == 0xfffff) {
864  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
865  return AVERROR_INVALIDDATA;
866  }
867  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
868  s->last_dc[component] = val;
869  block[0] = val;
870  return 0;
871 }
872 
873 /* decode block and dequantize - progressive JPEG version */
875  uint8_t *last_nnz, int ac_index,
876  uint16_t *quant_matrix,
877  int ss, int se, int Al, int *EOBRUN)
878 {
879  int code, i, j, val, run;
880  unsigned level;
881 
882  if (*EOBRUN) {
883  (*EOBRUN)--;
884  return 0;
885  }
886 
887  {
888  OPEN_READER(re, &s->gb);
889  for (i = ss; ; i++) {
890  UPDATE_CACHE(re, &s->gb);
891  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
892 
893  run = ((unsigned) code) >> 4;
894  code &= 0xF;
895  if (code) {
896  i += run;
897  if (code > MIN_CACHE_BITS - 16)
898  UPDATE_CACHE(re, &s->gb);
899 
900  {
901  int cache = GET_CACHE(re, &s->gb);
902  int sign = (~cache) >> 31;
903  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
904  }
905 
906  LAST_SKIP_BITS(re, &s->gb, code);
907 
908  if (i >= se) {
909  if (i == se) {
910  j = s->scantable.permutated[se];
911  block[j] = level * (quant_matrix[se] << Al);
912  break;
913  }
914  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
915  return AVERROR_INVALIDDATA;
916  }
917  j = s->scantable.permutated[i];
918  block[j] = level * (quant_matrix[i] << Al);
919  } else {
920  if (run == 0xF) {// ZRL - skip 15 coefficients
921  i += 15;
922  if (i >= se) {
923  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
924  return AVERROR_INVALIDDATA;
925  }
926  } else {
927  val = (1 << run);
928  if (run) {
929  UPDATE_CACHE(re, &s->gb);
930  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
931  LAST_SKIP_BITS(re, &s->gb, run);
932  }
933  *EOBRUN = val - 1;
934  break;
935  }
936  }
937  }
938  CLOSE_READER(re, &s->gb);
939  }
940 
941  if (i > *last_nnz)
942  *last_nnz = i;
943 
944  return 0;
945 }
946 
947 #define REFINE_BIT(j) { \
948  UPDATE_CACHE(re, &s->gb); \
949  sign = block[j] >> 15; \
950  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
951  ((quant_matrix[i] ^ sign) - sign) << Al; \
952  LAST_SKIP_BITS(re, &s->gb, 1); \
953 }
954 
955 #define ZERO_RUN \
956 for (; ; i++) { \
957  if (i > last) { \
958  i += run; \
959  if (i > se) { \
960  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
961  return -1; \
962  } \
963  break; \
964  } \
965  j = s->scantable.permutated[i]; \
966  if (block[j]) \
967  REFINE_BIT(j) \
968  else if (run-- == 0) \
969  break; \
970 }
971 
972 /* decode block and dequantize - progressive JPEG refinement pass */
974  uint8_t *last_nnz,
975  int ac_index, uint16_t *quant_matrix,
976  int ss, int se, int Al, int *EOBRUN)
977 {
978  int code, i = ss, j, sign, val, run;
979  int last = FFMIN(se, *last_nnz);
980 
981  OPEN_READER(re, &s->gb);
982  if (*EOBRUN) {
983  (*EOBRUN)--;
984  } else {
985  for (; ; i++) {
986  UPDATE_CACHE(re, &s->gb);
987  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
988 
989  if (code & 0xF) {
990  run = ((unsigned) code) >> 4;
991  UPDATE_CACHE(re, &s->gb);
992  val = SHOW_UBITS(re, &s->gb, 1);
993  LAST_SKIP_BITS(re, &s->gb, 1);
994  ZERO_RUN;
995  j = s->scantable.permutated[i];
996  val--;
997  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
998  if (i == se) {
999  if (i > *last_nnz)
1000  *last_nnz = i;
1001  CLOSE_READER(re, &s->gb);
1002  return 0;
1003  }
1004  } else {
1005  run = ((unsigned) code) >> 4;
1006  if (run == 0xF) {
1007  ZERO_RUN;
1008  } else {
1009  val = run;
1010  run = (1 << run);
1011  if (val) {
1012  UPDATE_CACHE(re, &s->gb);
1013  run += SHOW_UBITS(re, &s->gb, val);
1014  LAST_SKIP_BITS(re, &s->gb, val);
1015  }
1016  *EOBRUN = run - 1;
1017  break;
1018  }
1019  }
1020  }
1021 
1022  if (i > *last_nnz)
1023  *last_nnz = i;
1024  }
1025 
1026  for (; i <= last; i++) {
1027  j = s->scantable.permutated[i];
1028  if (block[j])
1029  REFINE_BIT(j)
1030  }
1031  CLOSE_READER(re, &s->gb);
1032 
1033  return 0;
1034 }
1035 #undef REFINE_BIT
1036 #undef ZERO_RUN
1037 
1038 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1039 {
1040  int i;
1041  int reset = 0;
1042 
1043  if (s->restart_interval) {
1044  s->restart_count--;
1045  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1046  align_get_bits(&s->gb);
1047  for (i = 0; i < nb_components; i++) /* reset dc */
1048  s->last_dc[i] = (4 << s->bits);
1049  }
1050 
1051  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1052  /* skip RSTn */
1053  if (s->restart_count == 0) {
1054  if( show_bits(&s->gb, i) == (1 << i) - 1
1055  || show_bits(&s->gb, i) == 0xFF) {
1056  int pos = get_bits_count(&s->gb);
1057  align_get_bits(&s->gb);
1058  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1059  skip_bits(&s->gb, 8);
1060  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1061  for (i = 0; i < nb_components; i++) /* reset dc */
1062  s->last_dc[i] = (4 << s->bits);
1063  reset = 1;
1064  } else
1065  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1066  }
1067  }
1068  }
1069  return reset;
1070 }
1071 
1072 /* Handles 1 to 4 components */
1073 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1074 {
1075  int i, mb_x, mb_y;
1076  unsigned width;
1077  uint16_t (*buffer)[4];
1078  int left[4], top[4], topleft[4];
1079  const int linesize = s->linesize[0];
1080  const int mask = ((1 << s->bits) - 1) << point_transform;
1081  int resync_mb_y = 0;
1082  int resync_mb_x = 0;
1083  int vpred[6];
1084 
1085  if (!s->bayer && s->nb_components < 3)
1086  return AVERROR_INVALIDDATA;
1087  if (s->bayer && s->nb_components > 2)
1088  return AVERROR_INVALIDDATA;
1089  if (s->nb_components <= 0 || s->nb_components > 4)
1090  return AVERROR_INVALIDDATA;
1091  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1092  return AVERROR_INVALIDDATA;
1093 
1094 
1095  s->restart_count = s->restart_interval;
1096 
1097  if (s->restart_interval == 0)
1098  s->restart_interval = INT_MAX;
1099 
1100  if (s->bayer)
1101  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1102  else
1103  width = s->mb_width;
1104 
1105  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1106  if (!s->ljpeg_buffer)
1107  return AVERROR(ENOMEM);
1108 
1109  buffer = s->ljpeg_buffer;
1110 
1111  for (i = 0; i < 4; i++)
1112  buffer[0][i] = 1 << (s->bits - 1);
1113 
1114  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1115  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1116 
1117  if (s->interlaced && s->bottom_field)
1118  ptr += linesize >> 1;
1119 
1120  for (i = 0; i < 4; i++)
1121  top[i] = left[i] = topleft[i] = buffer[0][i];
1122 
1123  if ((mb_y * s->width) % s->restart_interval == 0) {
1124  for (i = 0; i < 6; i++)
1125  vpred[i] = 1 << (s->bits-1);
1126  }
1127 
1128  for (mb_x = 0; mb_x < width; mb_x++) {
1129  int modified_predictor = predictor;
1130 
1131  if (get_bits_left(&s->gb) < 1) {
1132  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1133  return AVERROR_INVALIDDATA;
1134  }
1135 
1136  if (s->restart_interval && !s->restart_count){
1137  s->restart_count = s->restart_interval;
1138  resync_mb_x = mb_x;
1139  resync_mb_y = mb_y;
1140  for(i=0; i<4; i++)
1141  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1142  }
1143  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1144  modified_predictor = 1;
1145 
1146  for (i=0;i<nb_components;i++) {
1147  int pred, dc;
1148 
1149  topleft[i] = top[i];
1150  top[i] = buffer[mb_x][i];
1151 
1152  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1153  if(dc == 0xFFFFF)
1154  return -1;
1155 
1156  if (!s->bayer || mb_x) {
1157  pred = left[i];
1158  } else { /* This path runs only for the first line in bayer images */
1159  vpred[i] += dc;
1160  pred = vpred[i] - dc;
1161  }
1162 
1163  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1164 
1165  left[i] = buffer[mb_x][i] =
1166  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1167  }
1168 
1169  if (s->restart_interval && !--s->restart_count) {
1170  align_get_bits(&s->gb);
1171  skip_bits(&s->gb, 16); /* skip RSTn */
1172  }
1173  }
1174  if (s->rct && s->nb_components == 4) {
1175  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1176  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1177  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1178  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1179  ptr[4*mb_x + 0] = buffer[mb_x][3];
1180  }
1181  } else if (s->nb_components == 4) {
1182  for(i=0; i<nb_components; i++) {
1183  int c= s->comp_index[i];
1184  if (s->bits <= 8) {
1185  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1186  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1187  }
1188  } else if(s->bits == 9) {
1189  return AVERROR_PATCHWELCOME;
1190  } else {
1191  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1192  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1193  }
1194  }
1195  }
1196  } else if (s->rct) {
1197  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1198  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1199  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1200  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1201  }
1202  } else if (s->pegasus_rct) {
1203  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1204  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1205  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1206  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1207  }
1208  } else if (s->bayer) {
1209  if (nb_components == 1) {
1210  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1211  for (mb_x = 0; mb_x < width; mb_x++)
1212  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1213  } else if (nb_components == 2) {
1214  for (mb_x = 0; mb_x < width; mb_x++) {
1215  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1216  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1217  }
1218  }
1219  } else {
1220  for(i=0; i<nb_components; i++) {
1221  int c= s->comp_index[i];
1222  if (s->bits <= 8) {
1223  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1224  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1225  }
1226  } else if(s->bits == 9) {
1227  return AVERROR_PATCHWELCOME;
1228  } else {
1229  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1230  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1231  }
1232  }
1233  }
1234  }
1235  }
1236  return 0;
1237 }
1238 
1240  int point_transform, int nb_components)
1241 {
1242  int i, mb_x, mb_y, mask;
1243  int bits= (s->bits+7)&~7;
1244  int resync_mb_y = 0;
1245  int resync_mb_x = 0;
1246 
1247  point_transform += bits - s->bits;
1248  mask = ((1 << s->bits) - 1) << point_transform;
1249 
1250  av_assert0(nb_components>=1 && nb_components<=4);
1251 
1252  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1253  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1254  if (get_bits_left(&s->gb) < 1) {
1255  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1256  return AVERROR_INVALIDDATA;
1257  }
1258  if (s->restart_interval && !s->restart_count){
1259  s->restart_count = s->restart_interval;
1260  resync_mb_x = mb_x;
1261  resync_mb_y = mb_y;
1262  }
1263 
1264  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1265  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1266  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1267  for (i = 0; i < nb_components; i++) {
1268  uint8_t *ptr;
1269  uint16_t *ptr16;
1270  int n, h, v, x, y, c, j, linesize;
1271  n = s->nb_blocks[i];
1272  c = s->comp_index[i];
1273  h = s->h_scount[i];
1274  v = s->v_scount[i];
1275  x = 0;
1276  y = 0;
1277  linesize= s->linesize[c];
1278 
1279  if(bits>8) linesize /= 2;
1280 
1281  for(j=0; j<n; j++) {
1282  int pred, dc;
1283 
1284  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1285  if(dc == 0xFFFFF)
1286  return -1;
1287  if ( h * mb_x + x >= s->width
1288  || v * mb_y + y >= s->height) {
1289  // Nothing to do
1290  } else if (bits<=8) {
1291  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1292  if(y==0 && toprow){
1293  if(x==0 && leftcol){
1294  pred= 1 << (bits - 1);
1295  }else{
1296  pred= ptr[-1];
1297  }
1298  }else{
1299  if(x==0 && leftcol){
1300  pred= ptr[-linesize];
1301  }else{
1302  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1303  }
1304  }
1305 
1306  if (s->interlaced && s->bottom_field)
1307  ptr += linesize >> 1;
1308  pred &= mask;
1309  *ptr= pred + ((unsigned)dc << point_transform);
1310  }else{
1311  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1312  if(y==0 && toprow){
1313  if(x==0 && leftcol){
1314  pred= 1 << (bits - 1);
1315  }else{
1316  pred= ptr16[-1];
1317  }
1318  }else{
1319  if(x==0 && leftcol){
1320  pred= ptr16[-linesize];
1321  }else{
1322  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1323  }
1324  }
1325 
1326  if (s->interlaced && s->bottom_field)
1327  ptr16 += linesize >> 1;
1328  pred &= mask;
1329  *ptr16= pred + ((unsigned)dc << point_transform);
1330  }
1331  if (++x == h) {
1332  x = 0;
1333  y++;
1334  }
1335  }
1336  }
1337  } else {
1338  for (i = 0; i < nb_components; i++) {
1339  uint8_t *ptr;
1340  uint16_t *ptr16;
1341  int n, h, v, x, y, c, j, linesize, dc;
1342  n = s->nb_blocks[i];
1343  c = s->comp_index[i];
1344  h = s->h_scount[i];
1345  v = s->v_scount[i];
1346  x = 0;
1347  y = 0;
1348  linesize = s->linesize[c];
1349 
1350  if(bits>8) linesize /= 2;
1351 
1352  for (j = 0; j < n; j++) {
1353  int pred;
1354 
1355  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1356  if(dc == 0xFFFFF)
1357  return -1;
1358  if ( h * mb_x + x >= s->width
1359  || v * mb_y + y >= s->height) {
1360  // Nothing to do
1361  } else if (bits<=8) {
1362  ptr = s->picture_ptr->data[c] +
1363  (linesize * (v * mb_y + y)) +
1364  (h * mb_x + x); //FIXME optimize this crap
1365  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1366 
1367  pred &= mask;
1368  *ptr = pred + ((unsigned)dc << point_transform);
1369  }else{
1370  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1371  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1372 
1373  pred &= mask;
1374  *ptr16= pred + ((unsigned)dc << point_transform);
1375  }
1376 
1377  if (++x == h) {
1378  x = 0;
1379  y++;
1380  }
1381  }
1382  }
1383  }
1384  if (s->restart_interval && !--s->restart_count) {
1385  align_get_bits(&s->gb);
1386  skip_bits(&s->gb, 16); /* skip RSTn */
1387  }
1388  }
1389  }
1390  return 0;
1391 }
1392 
1394  uint8_t *dst, const uint8_t *src,
1395  int linesize, int lowres)
1396 {
1397  switch (lowres) {
1398  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1399  break;
1400  case 1: copy_block4(dst, src, linesize, linesize, 4);
1401  break;
1402  case 2: copy_block2(dst, src, linesize, linesize, 2);
1403  break;
1404  case 3: *dst = *src;
1405  break;
1406  }
1407 }
1408 
1409 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1410 {
1411  int block_x, block_y;
1412  int size = 8 >> s->avctx->lowres;
1413  if (s->bits > 8) {
1414  for (block_y=0; block_y<size; block_y++)
1415  for (block_x=0; block_x<size; block_x++)
1416  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1417  } else {
1418  for (block_y=0; block_y<size; block_y++)
1419  for (block_x=0; block_x<size; block_x++)
1420  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1421  }
1422 }
1423 
1424 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1425  int Al, const uint8_t *mb_bitmask,
1426  int mb_bitmask_size,
1427  const AVFrame *reference)
1428 {
1429  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1430  uint8_t *data[MAX_COMPONENTS];
1431  const uint8_t *reference_data[MAX_COMPONENTS];
1432  int linesize[MAX_COMPONENTS];
1433  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1434  int bytes_per_pixel = 1 + (s->bits > 8);
1435 
1436  if (mb_bitmask) {
1437  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1438  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1439  return AVERROR_INVALIDDATA;
1440  }
1441  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1442  }
1443 
1444  s->restart_count = 0;
1445 
1446  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1447  &chroma_v_shift);
1448  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1449  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1450 
1451  for (i = 0; i < nb_components; i++) {
1452  int c = s->comp_index[i];
1453  data[c] = s->picture_ptr->data[c];
1454  reference_data[c] = reference ? reference->data[c] : NULL;
1455  linesize[c] = s->linesize[c];
1456  s->coefs_finished[c] |= 1;
1457  }
1458 
1459  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1460  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1461  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1462 
1463  if (s->restart_interval && !s->restart_count)
1464  s->restart_count = s->restart_interval;
1465 
1466  if (get_bits_left(&s->gb) < 0) {
1467  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1468  -get_bits_left(&s->gb));
1469  return AVERROR_INVALIDDATA;
1470  }
1471  for (i = 0; i < nb_components; i++) {
1472  uint8_t *ptr;
1473  int n, h, v, x, y, c, j;
1474  int block_offset;
1475  n = s->nb_blocks[i];
1476  c = s->comp_index[i];
1477  h = s->h_scount[i];
1478  v = s->v_scount[i];
1479  x = 0;
1480  y = 0;
1481  for (j = 0; j < n; j++) {
1482  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1483  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1484 
1485  if (s->interlaced && s->bottom_field)
1486  block_offset += linesize[c] >> 1;
1487  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1488  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1489  ptr = data[c] + block_offset;
1490  } else
1491  ptr = NULL;
1492  if (!s->progressive) {
1493  if (copy_mb) {
1494  if (ptr)
1495  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1496  linesize[c], s->avctx->lowres);
1497 
1498  } else {
1499  s->bdsp.clear_block(s->block);
1500  if (decode_block(s, s->block, i,
1501  s->dc_index[i], s->ac_index[i],
1502  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1503  av_log(s->avctx, AV_LOG_ERROR,
1504  "error y=%d x=%d\n", mb_y, mb_x);
1505  return AVERROR_INVALIDDATA;
1506  }
1507  if (ptr) {
1508  s->idsp.idct_put(ptr, linesize[c], s->block);
1509  if (s->bits & 7)
1510  shift_output(s, ptr, linesize[c]);
1511  }
1512  }
1513  } else {
1514  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1515  (h * mb_x + x);
1516  int16_t *block = s->blocks[c][block_idx];
1517  if (Ah)
1518  block[0] += get_bits1(&s->gb) *
1519  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1520  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1521  s->quant_matrixes[s->quant_sindex[i]],
1522  Al) < 0) {
1523  av_log(s->avctx, AV_LOG_ERROR,
1524  "error y=%d x=%d\n", mb_y, mb_x);
1525  return AVERROR_INVALIDDATA;
1526  }
1527  }
1528  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1529  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1530  mb_x, mb_y, x, y, c, s->bottom_field,
1531  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1532  if (++x == h) {
1533  x = 0;
1534  y++;
1535  }
1536  }
1537  }
1538 
1539  handle_rstn(s, nb_components);
1540  }
1541  }
1542  return 0;
1543 }
1544 
1546  int se, int Ah, int Al)
1547 {
1548  int mb_x, mb_y;
1549  int EOBRUN = 0;
1550  int c = s->comp_index[0];
1551  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1552 
1553  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1554  if (se < ss || se > 63) {
1555  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1556  return AVERROR_INVALIDDATA;
1557  }
1558 
1559  // s->coefs_finished is a bitmask for coefficients coded
1560  // ss and se are parameters telling start and end coefficients
1561  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1562 
1563  s->restart_count = 0;
1564 
1565  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1566  int block_idx = mb_y * s->block_stride[c];
1567  int16_t (*block)[64] = &s->blocks[c][block_idx];
1568  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1569  if (get_bits_left(&s->gb) <= 0) {
1570  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1571  return AVERROR_INVALIDDATA;
1572  }
1573  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1574  int ret;
1575  if (s->restart_interval && !s->restart_count)
1576  s->restart_count = s->restart_interval;
1577 
1578  if (Ah)
1579  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1580  quant_matrix, ss, se, Al, &EOBRUN);
1581  else
1582  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1583  quant_matrix, ss, se, Al, &EOBRUN);
1584 
1585  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1587  if (ret < 0) {
1588  av_log(s->avctx, AV_LOG_ERROR,
1589  "error y=%d x=%d\n", mb_y, mb_x);
1590  return AVERROR_INVALIDDATA;
1591  }
1592 
1593  if (handle_rstn(s, 0))
1594  EOBRUN = 0;
1595  }
1596  }
1597  return 0;
1598 }
1599 
1601 {
1602  int mb_x, mb_y;
1603  int c;
1604  const int bytes_per_pixel = 1 + (s->bits > 8);
1605  const int block_size = s->lossless ? 1 : 8;
1606 
1607  for (c = 0; c < s->nb_components; c++) {
1608  uint8_t *data = s->picture_ptr->data[c];
1609  int linesize = s->linesize[c];
1610  int h = s->h_max / s->h_count[c];
1611  int v = s->v_max / s->v_count[c];
1612  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1613  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1614 
1615  if (~s->coefs_finished[c])
1616  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1617 
1618  if (s->interlaced && s->bottom_field)
1619  data += linesize >> 1;
1620 
1621  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1622  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1623  int block_idx = mb_y * s->block_stride[c];
1624  int16_t (*block)[64] = &s->blocks[c][block_idx];
1625  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1626  s->idsp.idct_put(ptr, linesize, *block);
1627  if (s->bits & 7)
1628  shift_output(s, ptr, linesize);
1629  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1630  }
1631  }
1632  }
1633 }
1634 
1635 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1636  int mb_bitmask_size, const AVFrame *reference)
1637 {
1638  int len, nb_components, i, h, v, predictor, point_transform;
1639  int index, id, ret;
1640  const int block_size = s->lossless ? 1 : 8;
1641  int ilv, prev_shift;
1642 
1643  if (!s->got_picture) {
1644  av_log(s->avctx, AV_LOG_WARNING,
1645  "Can not process SOS before SOF, skipping\n");
1646  return -1;
1647  }
1648 
1649  if (reference) {
1650  if (reference->width != s->picture_ptr->width ||
1651  reference->height != s->picture_ptr->height ||
1652  reference->format != s->picture_ptr->format) {
1653  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1654  return AVERROR_INVALIDDATA;
1655  }
1656  }
1657 
1658  /* XXX: verify len field validity */
1659  len = get_bits(&s->gb, 16);
1660  nb_components = get_bits(&s->gb, 8);
1661  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1663  "decode_sos: nb_components (%d)",
1664  nb_components);
1665  return AVERROR_PATCHWELCOME;
1666  }
1667  if (len != 6 + 2 * nb_components) {
1668  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1669  return AVERROR_INVALIDDATA;
1670  }
1671  for (i = 0; i < nb_components; i++) {
1672  id = get_bits(&s->gb, 8) - 1;
1673  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1674  /* find component index */
1675  for (index = 0; index < s->nb_components; index++)
1676  if (id == s->component_id[index])
1677  break;
1678  if (index == s->nb_components) {
1679  av_log(s->avctx, AV_LOG_ERROR,
1680  "decode_sos: index(%d) out of components\n", index);
1681  return AVERROR_INVALIDDATA;
1682  }
1683  /* Metasoft MJPEG codec has Cb and Cr swapped */
1684  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1685  && nb_components == 3 && s->nb_components == 3 && i)
1686  index = 3 - i;
1687 
1688  s->quant_sindex[i] = s->quant_index[index];
1689  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1690  s->h_scount[i] = s->h_count[index];
1691  s->v_scount[i] = s->v_count[index];
1692 
1693  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1694  index = (index+2)%3;
1695 
1696  s->comp_index[i] = index;
1697 
1698  s->dc_index[i] = get_bits(&s->gb, 4);
1699  s->ac_index[i] = get_bits(&s->gb, 4);
1700 
1701  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1702  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1703  goto out_of_range;
1704  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1705  goto out_of_range;
1706  }
1707 
1708  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1709  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1710  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1711  prev_shift = get_bits(&s->gb, 4); /* Ah */
1712  point_transform = get_bits(&s->gb, 4); /* Al */
1713  }else
1714  prev_shift = point_transform = 0;
1715 
1716  if (nb_components > 1) {
1717  /* interleaved stream */
1718  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1719  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1720  } else if (!s->ls) { /* skip this for JPEG-LS */
1721  h = s->h_max / s->h_scount[0];
1722  v = s->v_max / s->v_scount[0];
1723  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1724  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1725  s->nb_blocks[0] = 1;
1726  s->h_scount[0] = 1;
1727  s->v_scount[0] = 1;
1728  }
1729 
1730  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1731  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1732  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1733  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1734  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1735 
1736 
1737  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1738  for (i = s->mjpb_skiptosod; i > 0; i--)
1739  skip_bits(&s->gb, 8);
1740 
1741 next_field:
1742  for (i = 0; i < nb_components; i++)
1743  s->last_dc[i] = (4 << s->bits);
1744 
1745  if (s->avctx->hwaccel) {
1746  int bytes_to_start = get_bits_count(&s->gb) / 8;
1747  av_assert0(bytes_to_start >= 0 &&
1748  s->raw_scan_buffer_size >= bytes_to_start);
1749 
1750  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1751  s->raw_scan_buffer + bytes_to_start,
1752  s->raw_scan_buffer_size - bytes_to_start);
1753  if (ret < 0)
1754  return ret;
1755 
1756  } else if (s->lossless) {
1757  av_assert0(s->picture_ptr == s->picture);
1758  if (CONFIG_JPEGLS_DECODER && s->ls) {
1759 // for () {
1760 // reset_ls_coding_parameters(s, 0);
1761 
1763  point_transform, ilv)) < 0)
1764  return ret;
1765  } else {
1766  if (s->rgb || s->bayer) {
1767  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1768  return ret;
1769  } else {
1771  point_transform,
1772  nb_components)) < 0)
1773  return ret;
1774  }
1775  }
1776  } else {
1777  if (s->progressive && predictor) {
1778  av_assert0(s->picture_ptr == s->picture);
1780  ilv, prev_shift,
1781  point_transform)) < 0)
1782  return ret;
1783  } else {
1784  if ((ret = mjpeg_decode_scan(s, nb_components,
1785  prev_shift, point_transform,
1786  mb_bitmask, mb_bitmask_size, reference)) < 0)
1787  return ret;
1788  }
1789  }
1790 
1791  if (s->interlaced &&
1792  get_bits_left(&s->gb) > 32 &&
1793  show_bits(&s->gb, 8) == 0xFF) {
1794  GetBitContext bak = s->gb;
1795  align_get_bits(&bak);
1796  if (show_bits(&bak, 16) == 0xFFD1) {
1797  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1798  s->gb = bak;
1799  skip_bits(&s->gb, 16);
1800  s->bottom_field ^= 1;
1801 
1802  goto next_field;
1803  }
1804  }
1805 
1806  emms_c();
1807  return 0;
1808  out_of_range:
1809  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1810  return AVERROR_INVALIDDATA;
1811 }
1812 
1814 {
1815  if (get_bits(&s->gb, 16) != 4)
1816  return AVERROR_INVALIDDATA;
1817  s->restart_interval = get_bits(&s->gb, 16);
1818  s->restart_count = 0;
1819  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1820  s->restart_interval);
1821 
1822  return 0;
1823 }
1824 
1826 {
1827  int len, id, i;
1828 
1829  len = get_bits(&s->gb, 16);
1830  if (len < 6) {
1831  if (s->bayer) {
1832  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1833  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1834  skip_bits(&s->gb, len);
1835  return 0;
1836  } else
1837  return AVERROR_INVALIDDATA;
1838  }
1839  if (8 * len > get_bits_left(&s->gb))
1840  return AVERROR_INVALIDDATA;
1841 
1842  id = get_bits_long(&s->gb, 32);
1843  len -= 6;
1844 
1845  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1846  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1847  av_fourcc2str(av_bswap32(id)), id, len);
1848 
1849  /* Buggy AVID, it puts EOI only at every 10th frame. */
1850  /* Also, this fourcc is used by non-avid files too, it holds some
1851  information, but it's always present in AVID-created files. */
1852  if (id == AV_RB32("AVI1")) {
1853  /* structure:
1854  4bytes AVI1
1855  1bytes polarity
1856  1bytes always zero
1857  4bytes field_size
1858  4bytes field_size_less_padding
1859  */
1860  s->buggy_avid = 1;
1861  i = get_bits(&s->gb, 8); len--;
1862  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1863  goto out;
1864  }
1865 
1866  if (id == AV_RB32("JFIF")) {
1867  int t_w, t_h, v1, v2;
1868  if (len < 8)
1869  goto out;
1870  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1871  v1 = get_bits(&s->gb, 8);
1872  v2 = get_bits(&s->gb, 8);
1873  skip_bits(&s->gb, 8);
1874 
1875  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1876  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1877  if ( s->avctx->sample_aspect_ratio.num <= 0
1878  || s->avctx->sample_aspect_ratio.den <= 0) {
1879  s->avctx->sample_aspect_ratio.num = 0;
1880  s->avctx->sample_aspect_ratio.den = 1;
1881  }
1882 
1883  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1884  av_log(s->avctx, AV_LOG_INFO,
1885  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1886  v1, v2,
1887  s->avctx->sample_aspect_ratio.num,
1888  s->avctx->sample_aspect_ratio.den);
1889 
1890  len -= 8;
1891  if (len >= 2) {
1892  t_w = get_bits(&s->gb, 8);
1893  t_h = get_bits(&s->gb, 8);
1894  if (t_w && t_h) {
1895  /* skip thumbnail */
1896  if (len -10 - (t_w * t_h * 3) > 0)
1897  len -= t_w * t_h * 3;
1898  }
1899  len -= 2;
1900  }
1901  goto out;
1902  }
1903 
1904  if ( id == AV_RB32("Adob")
1905  && len >= 7
1906  && show_bits(&s->gb, 8) == 'e'
1907  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1908  skip_bits(&s->gb, 8); /* 'e' */
1909  skip_bits(&s->gb, 16); /* version */
1910  skip_bits(&s->gb, 16); /* flags0 */
1911  skip_bits(&s->gb, 16); /* flags1 */
1912  s->adobe_transform = get_bits(&s->gb, 8);
1913  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1914  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1915  len -= 7;
1916  goto out;
1917  }
1918 
1919  if (id == AV_RB32("LJIF")) {
1920  int rgb = s->rgb;
1921  int pegasus_rct = s->pegasus_rct;
1922  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1923  av_log(s->avctx, AV_LOG_INFO,
1924  "Pegasus lossless jpeg header found\n");
1925  skip_bits(&s->gb, 16); /* version ? */
1926  skip_bits(&s->gb, 16); /* unknown always 0? */
1927  skip_bits(&s->gb, 16); /* unknown always 0? */
1928  skip_bits(&s->gb, 16); /* unknown always 0? */
1929  switch (i=get_bits(&s->gb, 8)) {
1930  case 1:
1931  rgb = 1;
1932  pegasus_rct = 0;
1933  break;
1934  case 2:
1935  rgb = 1;
1936  pegasus_rct = 1;
1937  break;
1938  default:
1939  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1940  }
1941 
1942  len -= 9;
1943  if (s->got_picture)
1944  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1945  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1946  goto out;
1947  }
1948 
1949  s->rgb = rgb;
1950  s->pegasus_rct = pegasus_rct;
1951 
1952  goto out;
1953  }
1954  if (id == AV_RL32("colr") && len > 0) {
1955  s->colr = get_bits(&s->gb, 8);
1956  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1957  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1958  len --;
1959  goto out;
1960  }
1961  if (id == AV_RL32("xfrm") && len > 0) {
1962  s->xfrm = get_bits(&s->gb, 8);
1963  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1964  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1965  len --;
1966  goto out;
1967  }
1968 
1969  /* JPS extension by VRex */
1970  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1971  int flags, layout, type;
1972  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1973  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1974 
1975  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1976  skip_bits(&s->gb, 16); len -= 2; /* block length */
1977  skip_bits(&s->gb, 8); /* reserved */
1978  flags = get_bits(&s->gb, 8);
1979  layout = get_bits(&s->gb, 8);
1980  type = get_bits(&s->gb, 8);
1981  len -= 4;
1982 
1983  av_freep(&s->stereo3d);
1984  s->stereo3d = av_stereo3d_alloc();
1985  if (!s->stereo3d) {
1986  goto out;
1987  }
1988  if (type == 0) {
1989  s->stereo3d->type = AV_STEREO3D_2D;
1990  } else if (type == 1) {
1991  switch (layout) {
1992  case 0x01:
1993  s->stereo3d->type = AV_STEREO3D_LINES;
1994  break;
1995  case 0x02:
1996  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1997  break;
1998  case 0x03:
1999  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2000  break;
2001  }
2002  if (!(flags & 0x04)) {
2003  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2004  }
2005  }
2006  goto out;
2007  }
2008 
2009  /* EXIF metadata */
2010  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2011  GetByteContext gbytes;
2012  int ret, le, ifd_offset, bytes_read;
2013  const uint8_t *aligned;
2014 
2015  skip_bits(&s->gb, 16); // skip padding
2016  len -= 2;
2017 
2018  // init byte wise reading
2019  aligned = align_get_bits(&s->gb);
2020  bytestream2_init(&gbytes, aligned, len);
2021 
2022  // read TIFF header
2023  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2024  if (ret) {
2025  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2026  } else {
2027  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2028 
2029  // read 0th IFD and store the metadata
2030  // (return values > 0 indicate the presence of subimage metadata)
2031  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2032  if (ret < 0) {
2033  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2034  }
2035  }
2036 
2037  bytes_read = bytestream2_tell(&gbytes);
2038  skip_bits(&s->gb, bytes_read << 3);
2039  len -= bytes_read;
2040 
2041  goto out;
2042  }
2043 
2044  /* Apple MJPEG-A */
2045  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2046  id = get_bits_long(&s->gb, 32);
2047  len -= 4;
2048  /* Apple MJPEG-A */
2049  if (id == AV_RB32("mjpg")) {
2050  /* structure:
2051  4bytes field size
2052  4bytes pad field size
2053  4bytes next off
2054  4bytes quant off
2055  4bytes huff off
2056  4bytes image off
2057  4bytes scan off
2058  4bytes data off
2059  */
2060  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2061  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2062  }
2063  }
2064 
2065  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2066  int id2;
2067  unsigned seqno;
2068  unsigned nummarkers;
2069 
2070  id = get_bits_long(&s->gb, 32);
2071  id2 = get_bits(&s->gb, 24);
2072  len -= 7;
2073  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2074  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2075  goto out;
2076  }
2077 
2078  skip_bits(&s->gb, 8);
2079  seqno = get_bits(&s->gb, 8);
2080  len -= 2;
2081  if (seqno == 0) {
2082  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2083  goto out;
2084  }
2085 
2086  nummarkers = get_bits(&s->gb, 8);
2087  len -= 1;
2088  if (nummarkers == 0) {
2089  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2090  goto out;
2091  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2092  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2093  goto out;
2094  } else if (seqno > nummarkers) {
2095  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2096  goto out;
2097  }
2098 
2099  /* Allocate if this is the first APP2 we've seen. */
2100  if (s->iccnum == 0) {
2101  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2102  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2103  return AVERROR(ENOMEM);
2104  }
2105  s->iccnum = nummarkers;
2106  }
2107 
2108  if (s->iccentries[seqno - 1].data) {
2109  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2110  goto out;
2111  }
2112 
2113  s->iccentries[seqno - 1].length = len;
2114  s->iccentries[seqno - 1].data = av_malloc(len);
2115  if (!s->iccentries[seqno - 1].data) {
2116  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2117  return AVERROR(ENOMEM);
2118  }
2119 
2120  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2121  skip_bits(&s->gb, len << 3);
2122  len = 0;
2123  s->iccread++;
2124 
2125  if (s->iccread > s->iccnum)
2126  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2127  }
2128 
2129 out:
2130  /* slow but needed for extreme adobe jpegs */
2131  if (len < 0)
2132  av_log(s->avctx, AV_LOG_ERROR,
2133  "mjpeg: error, decode_app parser read over the end\n");
2134  while (--len > 0)
2135  skip_bits(&s->gb, 8);
2136 
2137  return 0;
2138 }
2139 
2141 {
2142  int len = get_bits(&s->gb, 16);
2143  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2144  int i;
2145  char *cbuf = av_malloc(len - 1);
2146  if (!cbuf)
2147  return AVERROR(ENOMEM);
2148 
2149  for (i = 0; i < len - 2; i++)
2150  cbuf[i] = get_bits(&s->gb, 8);
2151  if (i > 0 && cbuf[i - 1] == '\n')
2152  cbuf[i - 1] = 0;
2153  else
2154  cbuf[i] = 0;
2155 
2156  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2157  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2158 
2159  /* buggy avid, it puts EOI only at every 10th frame */
2160  if (!strncmp(cbuf, "AVID", 4)) {
2161  parse_avid(s, cbuf, len);
2162  } else if (!strcmp(cbuf, "CS=ITU601"))
2163  s->cs_itu601 = 1;
2164  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2165  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2166  s->flipped = 1;
2167  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2168  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2169  s->multiscope = 2;
2170  }
2171 
2172  av_free(cbuf);
2173  }
2174 
2175  return 0;
2176 }
2177 
2178 /* return the 8 bit start code value and update the search
2179  state. Return -1 if no start code found */
2180 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2181 {
2182  const uint8_t *buf_ptr;
2183  unsigned int v, v2;
2184  int val;
2185  int skipped = 0;
2186 
2187  buf_ptr = *pbuf_ptr;
2188  while (buf_end - buf_ptr > 1) {
2189  v = *buf_ptr++;
2190  v2 = *buf_ptr;
2191  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2192  val = *buf_ptr++;
2193  goto found;
2194  }
2195  skipped++;
2196  }
2197  buf_ptr = buf_end;
2198  val = -1;
2199 found:
2200  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2201  *pbuf_ptr = buf_ptr;
2202  return val;
2203 }
2204 
2206  const uint8_t **buf_ptr, const uint8_t *buf_end,
2207  const uint8_t **unescaped_buf_ptr,
2208  int *unescaped_buf_size)
2209 {
2210  int start_code;
2211  start_code = find_marker(buf_ptr, buf_end);
2212 
2213  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2214  if (!s->buffer)
2215  return AVERROR(ENOMEM);
2216 
2217  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2218  if (start_code == SOS && !s->ls) {
2219  const uint8_t *src = *buf_ptr;
2220  const uint8_t *ptr = src;
2221  uint8_t *dst = s->buffer;
2222 
2223  #define copy_data_segment(skip) do { \
2224  ptrdiff_t length = (ptr - src) - (skip); \
2225  if (length > 0) { \
2226  memcpy(dst, src, length); \
2227  dst += length; \
2228  src = ptr; \
2229  } \
2230  } while (0)
2231 
2232  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2233  ptr = buf_end;
2234  copy_data_segment(0);
2235  } else {
2236  while (ptr < buf_end) {
2237  uint8_t x = *(ptr++);
2238 
2239  if (x == 0xff) {
2240  ptrdiff_t skip = 0;
2241  while (ptr < buf_end && x == 0xff) {
2242  x = *(ptr++);
2243  skip++;
2244  }
2245 
2246  /* 0xFF, 0xFF, ... */
2247  if (skip > 1) {
2248  copy_data_segment(skip);
2249 
2250  /* decrement src as it is equal to ptr after the
2251  * copy_data_segment macro and we might want to
2252  * copy the current value of x later on */
2253  src--;
2254  }
2255 
2256  if (x < RST0 || x > RST7) {
2257  copy_data_segment(1);
2258  if (x)
2259  break;
2260  }
2261  }
2262  }
2263  if (src < ptr)
2264  copy_data_segment(0);
2265  }
2266  #undef copy_data_segment
2267 
2268  *unescaped_buf_ptr = s->buffer;
2269  *unescaped_buf_size = dst - s->buffer;
2270  memset(s->buffer + *unescaped_buf_size, 0,
2272 
2273  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2274  (buf_end - *buf_ptr) - (dst - s->buffer));
2275  } else if (start_code == SOS && s->ls) {
2276  const uint8_t *src = *buf_ptr;
2277  uint8_t *dst = s->buffer;
2278  int bit_count = 0;
2279  int t = 0, b = 0;
2280  PutBitContext pb;
2281 
2282  /* find marker */
2283  while (src + t < buf_end) {
2284  uint8_t x = src[t++];
2285  if (x == 0xff) {
2286  while ((src + t < buf_end) && x == 0xff)
2287  x = src[t++];
2288  if (x & 0x80) {
2289  t -= FFMIN(2, t);
2290  break;
2291  }
2292  }
2293  }
2294  bit_count = t * 8;
2295  init_put_bits(&pb, dst, t);
2296 
2297  /* unescape bitstream */
2298  while (b < t) {
2299  uint8_t x = src[b++];
2300  put_bits(&pb, 8, x);
2301  if (x == 0xFF && b < t) {
2302  x = src[b++];
2303  if (x & 0x80) {
2304  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2305  x &= 0x7f;
2306  }
2307  put_bits(&pb, 7, x);
2308  bit_count--;
2309  }
2310  }
2311  flush_put_bits(&pb);
2312 
2313  *unescaped_buf_ptr = dst;
2314  *unescaped_buf_size = (bit_count + 7) >> 3;
2315  memset(s->buffer + *unescaped_buf_size, 0,
2317  } else {
2318  *unescaped_buf_ptr = *buf_ptr;
2319  *unescaped_buf_size = buf_end - *buf_ptr;
2320  }
2321 
2322  return start_code;
2323 }
2324 
2326 {
2327  int i;
2328 
2329  if (s->iccentries) {
2330  for (i = 0; i < s->iccnum; i++)
2331  av_freep(&s->iccentries[i].data);
2332  av_freep(&s->iccentries);
2333  }
2334 
2335  s->iccread = 0;
2336  s->iccnum = 0;
2337 }
2338 
2339 // SMV JPEG just stacks several output frames into one JPEG picture
2340 // we handle that by setting up the cropping parameters appropriately
2342 {
2343  MJpegDecodeContext *s = avctx->priv_data;
2344  int ret;
2345 
2346  if (s->smv_next_frame > 0) {
2347  av_assert0(s->smv_frame->buf[0]);
2349  ret = av_frame_ref(frame, s->smv_frame);
2350  if (ret < 0)
2351  return ret;
2352  } else {
2353  av_assert0(frame->buf[0]);
2354  av_frame_unref(s->smv_frame);
2355  ret = av_frame_ref(s->smv_frame, frame);
2356  if (ret < 0)
2357  return ret;
2358  }
2359 
2360  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2361 
2362  frame->width = avctx->coded_width;
2363  frame->height = avctx->coded_height;
2364  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2365  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2366 
2367  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2368 
2369  if (s->smv_next_frame == 0)
2370  av_frame_unref(s->smv_frame);
2371 
2372  return 0;
2373 }
2374 
2376 {
2377  MJpegDecodeContext *s = avctx->priv_data;
2378  int ret;
2379 
2380  av_packet_unref(s->pkt);
2381  ret = ff_decode_get_packet(avctx, s->pkt);
2382  if (ret < 0)
2383  return ret;
2384 
2385 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2386  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2387  avctx->codec_id == AV_CODEC_ID_AMV) {
2388  ret = ff_sp5x_process_packet(avctx, s->pkt);
2389  if (ret < 0)
2390  return ret;
2391  }
2392 #endif
2393 
2394  s->buf_size = s->pkt->size;
2395 
2396  return 0;
2397 }
2398 
2400 {
2401  MJpegDecodeContext *s = avctx->priv_data;
2402  const uint8_t *buf_end, *buf_ptr;
2403  const uint8_t *unescaped_buf_ptr;
2404  int hshift, vshift;
2405  int unescaped_buf_size;
2406  int start_code;
2407  int i, index;
2408  int ret = 0;
2409  int is16bit;
2410  AVDictionaryEntry *e = NULL;
2411 
2412  s->force_pal8 = 0;
2413 
2414  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2415  return smv_process_frame(avctx, frame);
2416 
2417  av_dict_free(&s->exif_metadata);
2418  av_freep(&s->stereo3d);
2419  s->adobe_transform = -1;
2420 
2421  if (s->iccnum != 0)
2423 
2424  ret = mjpeg_get_packet(avctx);
2425  if (ret < 0)
2426  return ret;
2427 redo_for_pal8:
2428  buf_ptr = s->pkt->data;
2429  buf_end = s->pkt->data + s->pkt->size;
2430  while (buf_ptr < buf_end) {
2431  /* find start next marker */
2432  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2433  &unescaped_buf_ptr,
2434  &unescaped_buf_size);
2435  /* EOF */
2436  if (start_code < 0) {
2437  break;
2438  } else if (unescaped_buf_size > INT_MAX / 8) {
2439  av_log(avctx, AV_LOG_ERROR,
2440  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2441  start_code, unescaped_buf_size, s->pkt->size);
2442  return AVERROR_INVALIDDATA;
2443  }
2444  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2445  start_code, buf_end - buf_ptr);
2446 
2447  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2448 
2449  if (ret < 0) {
2450  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2451  goto fail;
2452  }
2453 
2454  s->start_code = start_code;
2455  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2456  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2457 
2458  /* process markers */
2459  if (start_code >= RST0 && start_code <= RST7) {
2460  av_log(avctx, AV_LOG_DEBUG,
2461  "restart marker: %d\n", start_code & 0x0f);
2462  /* APP fields */
2463  } else if (start_code >= APP0 && start_code <= APP15) {
2464  if ((ret = mjpeg_decode_app(s)) < 0)
2465  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2466  av_err2str(ret));
2467  /* Comment */
2468  } else if (start_code == COM) {
2469  ret = mjpeg_decode_com(s);
2470  if (ret < 0)
2471  return ret;
2472  } else if (start_code == DQT) {
2474  if (ret < 0)
2475  return ret;
2476  }
2477 
2478  ret = -1;
2479 
2480  if (!CONFIG_JPEGLS_DECODER &&
2481  (start_code == SOF48 || start_code == LSE)) {
2482  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2483  return AVERROR(ENOSYS);
2484  }
2485 
2486  if (avctx->skip_frame == AVDISCARD_ALL) {
2487  switch(start_code) {
2488  case SOF0:
2489  case SOF1:
2490  case SOF2:
2491  case SOF3:
2492  case SOF48:
2493  case SOI:
2494  case SOS:
2495  case EOI:
2496  break;
2497  default:
2498  goto skip;
2499  }
2500  }
2501 
2502  switch (start_code) {
2503  case SOI:
2504  s->restart_interval = 0;
2505  s->restart_count = 0;
2506  s->raw_image_buffer = buf_ptr;
2507  s->raw_image_buffer_size = buf_end - buf_ptr;
2508  /* nothing to do on SOI */
2509  break;
2510  case DHT:
2511  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2512  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2513  goto fail;
2514  }
2515  break;
2516  case SOF0:
2517  case SOF1:
2518  if (start_code == SOF0)
2519  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2520  else
2522  s->lossless = 0;
2523  s->ls = 0;
2524  s->progressive = 0;
2525  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2526  goto fail;
2527  break;
2528  case SOF2:
2529  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2530  s->lossless = 0;
2531  s->ls = 0;
2532  s->progressive = 1;
2533  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2534  goto fail;
2535  break;
2536  case SOF3:
2537  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2538  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2539  s->lossless = 1;
2540  s->ls = 0;
2541  s->progressive = 0;
2542  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2543  goto fail;
2544  break;
2545  case SOF48:
2546  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2547  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2548  s->lossless = 1;
2549  s->ls = 1;
2550  s->progressive = 0;
2551  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2552  goto fail;
2553  break;
2554  case LSE:
2555  if (!CONFIG_JPEGLS_DECODER ||
2556  (ret = ff_jpegls_decode_lse(s)) < 0)
2557  goto fail;
2558  if (ret == 1)
2559  goto redo_for_pal8;
2560  break;
2561  case EOI:
2562 eoi_parser:
2563  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2564  s->progressive && s->cur_scan && s->got_picture)
2566  s->cur_scan = 0;
2567  if (!s->got_picture) {
2568  av_log(avctx, AV_LOG_WARNING,
2569  "Found EOI before any SOF, ignoring\n");
2570  break;
2571  }
2572  if (s->interlaced) {
2573  s->bottom_field ^= 1;
2574  /* if not bottom field, do not output image yet */
2575  if (s->bottom_field == !s->interlace_polarity)
2576  break;
2577  }
2578  if (avctx->skip_frame == AVDISCARD_ALL) {
2579  s->got_picture = 0;
2580  ret = AVERROR(EAGAIN);
2581  goto the_end_no_picture;
2582  }
2583  if (s->avctx->hwaccel) {
2584  ret = s->avctx->hwaccel->end_frame(s->avctx);
2585  if (ret < 0)
2586  return ret;
2587 
2588  av_freep(&s->hwaccel_picture_private);
2589  }
2590  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2591  return ret;
2592  s->got_picture = 0;
2593 
2594  frame->pkt_dts = s->pkt->dts;
2595 
2596  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2597  int qp = FFMAX3(s->qscale[0],
2598  s->qscale[1],
2599  s->qscale[2]);
2600 
2601  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2602  }
2603 
2604  goto the_end;
2605  case SOS:
2606  s->raw_scan_buffer = buf_ptr;
2607  s->raw_scan_buffer_size = buf_end - buf_ptr;
2608 
2609  s->cur_scan++;
2610  if (avctx->skip_frame == AVDISCARD_ALL) {
2611  skip_bits(&s->gb, get_bits_left(&s->gb));
2612  break;
2613  }
2614 
2615  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2616  (avctx->err_recognition & AV_EF_EXPLODE))
2617  goto fail;
2618  break;
2619  case DRI:
2620  if ((ret = mjpeg_decode_dri(s)) < 0)
2621  return ret;
2622  break;
2623  case SOF5:
2624  case SOF6:
2625  case SOF7:
2626  case SOF9:
2627  case SOF10:
2628  case SOF11:
2629  case SOF13:
2630  case SOF14:
2631  case SOF15:
2632  case JPG:
2633  av_log(avctx, AV_LOG_ERROR,
2634  "mjpeg: unsupported coding type (%x)\n", start_code);
2635  break;
2636  }
2637 
2638 skip:
2639  /* eof process start code */
2640  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2641  av_log(avctx, AV_LOG_DEBUG,
2642  "marker parser used %d bytes (%d bits)\n",
2643  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2644  }
2645  if (s->got_picture && s->cur_scan) {
2646  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2647  goto eoi_parser;
2648  }
2649  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2650  return AVERROR_INVALIDDATA;
2651 fail:
2652  s->got_picture = 0;
2653  return ret;
2654 the_end:
2655 
2656  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2657 
2658  if (AV_RB32(s->upscale_h)) {
2659  int p;
2661  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2662  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2663  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2664  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2665  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2666  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2667  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2668  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2669  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2670  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2671  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2672  );
2673  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2674  if (ret)
2675  return ret;
2676 
2677  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2678  for (p = 0; p<s->nb_components; p++) {
2679  uint8_t *line = s->picture_ptr->data[p];
2680  int w = s->width;
2681  int h = s->height;
2682  if (!s->upscale_h[p])
2683  continue;
2684  if (p==1 || p==2) {
2685  w = AV_CEIL_RSHIFT(w, hshift);
2686  h = AV_CEIL_RSHIFT(h, vshift);
2687  }
2688  if (s->upscale_v[p] == 1)
2689  h = (h+1)>>1;
2690  av_assert0(w > 0);
2691  for (i = 0; i < h; i++) {
2692  if (s->upscale_h[p] == 1) {
2693  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2694  else line[w - 1] = line[(w - 1) / 2];
2695  for (index = w - 2; index > 0; index--) {
2696  if (is16bit)
2697  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2698  else
2699  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2700  }
2701  } else if (s->upscale_h[p] == 2) {
2702  if (is16bit) {
2703  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2704  if (w > 1)
2705  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2706  } else {
2707  line[w - 1] = line[(w - 1) / 3];
2708  if (w > 1)
2709  line[w - 2] = line[w - 1];
2710  }
2711  for (index = w - 3; index > 0; index--) {
2712  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2713  }
2714  }
2715  line += s->linesize[p];
2716  }
2717  }
2718  }
2719  if (AV_RB32(s->upscale_v)) {
2720  int p;
2722  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2727  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2728  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2729  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2730  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2731  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2732  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2733  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2734  );
2735  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2736  if (ret)
2737  return ret;
2738 
2739  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2740  for (p = 0; p < s->nb_components; p++) {
2741  uint8_t *dst;
2742  int w = s->width;
2743  int h = s->height;
2744  if (!s->upscale_v[p])
2745  continue;
2746  if (p==1 || p==2) {
2747  w = AV_CEIL_RSHIFT(w, hshift);
2748  h = AV_CEIL_RSHIFT(h, vshift);
2749  }
2750  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2751  for (i = h - 1; i; i--) {
2752  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2753  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2754  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2755  memcpy(dst, src1, w);
2756  } else {
2757  for (index = 0; index < w; index++)
2758  dst[index] = (src1[index] + src2[index]) >> 1;
2759  }
2760  dst -= s->linesize[p];
2761  }
2762  }
2763  }
2764  if (s->flipped && !s->rgb) {
2765  int j;
2766  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2767  if (ret)
2768  return ret;
2769 
2770  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2771  for (index=0; index<s->nb_components; index++) {
2772  uint8_t *dst = s->picture_ptr->data[index];
2773  int w = s->picture_ptr->width;
2774  int h = s->picture_ptr->height;
2775  if(index && index<3){
2776  w = AV_CEIL_RSHIFT(w, hshift);
2777  h = AV_CEIL_RSHIFT(h, vshift);
2778  }
2779  if(dst){
2780  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2781  for (i=0; i<h/2; i++) {
2782  for (j=0; j<w; j++)
2783  FFSWAP(int, dst[j], dst2[j]);
2784  dst += s->picture_ptr->linesize[index];
2785  dst2 -= s->picture_ptr->linesize[index];
2786  }
2787  }
2788  }
2789  }
2790  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2791  int w = s->picture_ptr->width;
2792  int h = s->picture_ptr->height;
2793  av_assert0(s->nb_components == 4);
2794  for (i=0; i<h; i++) {
2795  int j;
2796  uint8_t *dst[4];
2797  for (index=0; index<4; index++) {
2798  dst[index] = s->picture_ptr->data[index]
2799  + s->picture_ptr->linesize[index]*i;
2800  }
2801  for (j=0; j<w; j++) {
2802  int k = dst[3][j];
2803  int r = dst[0][j] * k;
2804  int g = dst[1][j] * k;
2805  int b = dst[2][j] * k;
2806  dst[0][j] = g*257 >> 16;
2807  dst[1][j] = b*257 >> 16;
2808  dst[2][j] = r*257 >> 16;
2809  dst[3][j] = 255;
2810  }
2811  }
2812  }
2813  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2814  int w = s->picture_ptr->width;
2815  int h = s->picture_ptr->height;
2816  av_assert0(s->nb_components == 4);
2817  for (i=0; i<h; i++) {
2818  int j;
2819  uint8_t *dst[4];
2820  for (index=0; index<4; index++) {
2821  dst[index] = s->picture_ptr->data[index]
2822  + s->picture_ptr->linesize[index]*i;
2823  }
2824  for (j=0; j<w; j++) {
2825  int k = dst[3][j];
2826  int r = (255 - dst[0][j]) * k;
2827  int g = (128 - dst[1][j]) * k;
2828  int b = (128 - dst[2][j]) * k;
2829  dst[0][j] = r*257 >> 16;
2830  dst[1][j] = (g*257 >> 16) + 128;
2831  dst[2][j] = (b*257 >> 16) + 128;
2832  dst[3][j] = 255;
2833  }
2834  }
2835  }
2836 
2837  if (s->stereo3d) {
2839  if (stereo) {
2840  stereo->type = s->stereo3d->type;
2841  stereo->flags = s->stereo3d->flags;
2842  }
2843  av_freep(&s->stereo3d);
2844  }
2845 
2846  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2847  AVFrameSideData *sd;
2848  size_t offset = 0;
2849  int total_size = 0;
2850  int i;
2851 
2852  /* Sum size of all parts. */
2853  for (i = 0; i < s->iccnum; i++)
2854  total_size += s->iccentries[i].length;
2855 
2857  if (!sd) {
2858  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2859  return AVERROR(ENOMEM);
2860  }
2861 
2862  /* Reassemble the parts, which are now in-order. */
2863  for (i = 0; i < s->iccnum; i++) {
2864  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2865  offset += s->iccentries[i].length;
2866  }
2867  }
2868 
2869  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2870  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2871  int orientation = strtol(value, &endptr, 0);
2872 
2873  if (!*endptr) {
2874  AVFrameSideData *sd = NULL;
2875 
2876  if (orientation >= 2 && orientation <= 8) {
2877  int32_t *matrix;
2878 
2880  if (!sd) {
2881  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2882  return AVERROR(ENOMEM);
2883  }
2884 
2885  matrix = (int32_t *)sd->data;
2886 
2887  switch (orientation) {
2888  case 2:
2889  av_display_rotation_set(matrix, 0.0);
2890  av_display_matrix_flip(matrix, 1, 0);
2891  break;
2892  case 3:
2893  av_display_rotation_set(matrix, 180.0);
2894  break;
2895  case 4:
2896  av_display_rotation_set(matrix, 180.0);
2897  av_display_matrix_flip(matrix, 1, 0);
2898  break;
2899  case 5:
2900  av_display_rotation_set(matrix, 90.0);
2901  av_display_matrix_flip(matrix, 0, 1);
2902  break;
2903  case 6:
2904  av_display_rotation_set(matrix, 90.0);
2905  break;
2906  case 7:
2907  av_display_rotation_set(matrix, -90.0);
2908  av_display_matrix_flip(matrix, 0, 1);
2909  break;
2910  case 8:
2911  av_display_rotation_set(matrix, -90.0);
2912  break;
2913  default:
2914  av_assert0(0);
2915  }
2916  }
2917  }
2918  }
2919 
2920  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2921  av_dict_free(&s->exif_metadata);
2922 
2923  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2924  ret = smv_process_frame(avctx, frame);
2925  if (ret < 0) {
2927  return ret;
2928  }
2929  }
2930  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2931  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2932  avctx->coded_height > s->orig_height) {
2933  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2934  frame->crop_top = frame->height - avctx->height;
2935  }
2936 
2937  ret = 0;
2938 
2939 the_end_no_picture:
2940  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2941  buf_end - buf_ptr);
2942 
2943  return ret;
2944 }
2945 
2946 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2947  * even without having called ff_mjpeg_decode_init(). */
2949 {
2950  MJpegDecodeContext *s = avctx->priv_data;
2951  int i, j;
2952 
2953  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2954  av_log(avctx, AV_LOG_INFO, "Single field\n");
2955  }
2956 
2957  if (s->picture) {
2958  av_frame_free(&s->picture);
2959  s->picture_ptr = NULL;
2960  } else if (s->picture_ptr)
2961  av_frame_unref(s->picture_ptr);
2962 
2963  av_packet_free(&s->pkt);
2964 
2965  av_frame_free(&s->smv_frame);
2966 
2967  av_freep(&s->buffer);
2968  av_freep(&s->stereo3d);
2969  av_freep(&s->ljpeg_buffer);
2970  s->ljpeg_buffer_size = 0;
2971 
2972  for (i = 0; i < 3; i++) {
2973  for (j = 0; j < 4; j++)
2974  ff_free_vlc(&s->vlcs[i][j]);
2975  }
2976  for (i = 0; i < MAX_COMPONENTS; i++) {
2977  av_freep(&s->blocks[i]);
2978  av_freep(&s->last_nnz[i]);
2979  }
2980  av_dict_free(&s->exif_metadata);
2981 
2983 
2984  av_freep(&s->hwaccel_picture_private);
2985  av_freep(&s->jls_state);
2986 
2987  return 0;
2988 }
2989 
2990 static void decode_flush(AVCodecContext *avctx)
2991 {
2992  MJpegDecodeContext *s = avctx->priv_data;
2993  s->got_picture = 0;
2994 
2995  s->smv_next_frame = 0;
2996  av_frame_unref(s->smv_frame);
2997 }
2998 
2999 #if CONFIG_MJPEG_DECODER
3000 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
3001 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
3002 static const AVOption options[] = {
3003  { "extern_huff", "Use external huffman table.",
3004  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
3005  { NULL },
3006 };
3007 
3008 static const AVClass mjpegdec_class = {
3009  .class_name = "MJPEG decoder",
3010  .item_name = av_default_item_name,
3011  .option = options,
3012  .version = LIBAVUTIL_VERSION_INT,
3013 };
3014 
3015 const AVCodec ff_mjpeg_decoder = {
3016  .name = "mjpeg",
3017  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
3018  .type = AVMEDIA_TYPE_VIDEO,
3019  .id = AV_CODEC_ID_MJPEG,
3020  .priv_data_size = sizeof(MJpegDecodeContext),
3022  .close = ff_mjpeg_decode_end,
3024  .flush = decode_flush,
3025  .capabilities = AV_CODEC_CAP_DR1,
3026  .max_lowres = 3,
3027  .priv_class = &mjpegdec_class,
3031  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3032 #if CONFIG_MJPEG_NVDEC_HWACCEL
3033  HWACCEL_NVDEC(mjpeg),
3034 #endif
3035 #if CONFIG_MJPEG_VAAPI_HWACCEL
3036  HWACCEL_VAAPI(mjpeg),
3037 #endif
3038  NULL
3039  },
3040 };
3041 #endif
3042 #if CONFIG_THP_DECODER
3043 const AVCodec ff_thp_decoder = {
3044  .name = "thp",
3045  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
3046  .type = AVMEDIA_TYPE_VIDEO,
3047  .id = AV_CODEC_ID_THP,
3048  .priv_data_size = sizeof(MJpegDecodeContext),
3050  .close = ff_mjpeg_decode_end,
3052  .flush = decode_flush,
3053  .capabilities = AV_CODEC_CAP_DR1,
3054  .max_lowres = 3,
3057 };
3058 #endif
3059 
3060 #if CONFIG_SMVJPEG_DECODER
3061 const AVCodec ff_smvjpeg_decoder = {
3062  .name = "smvjpeg",
3063  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
3064  .type = AVMEDIA_TYPE_VIDEO,
3065  .id = AV_CODEC_ID_SMVJPEG,
3066  .priv_data_size = sizeof(MJpegDecodeContext),
3068  .close = ff_mjpeg_decode_end,
3070  .flush = decode_flush,
3071  .capabilities = AV_CODEC_CAP_DR1,
3074 };
3075 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:417
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
AVCodec
AVCodec.
Definition: codec.h:202
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:292
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:224
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:57
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:603
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1087
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1393
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2990
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2564
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:955
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1324
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:707
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:547
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:109
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:191
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
index
fg index
Definition: ffmpeg_filter.c:168
AVFrame::width
int width
Definition: frame.h:361
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:433
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:579
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1629
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2341
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:989
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:789
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:143
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:798
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2375
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:179
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:150
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1303
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:216
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2604
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:511
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
rgb
Definition: rpzaenc.c:59
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:239
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1239
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1409
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:118
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1673
fail
#define fail()
Definition: checkasm.h:127
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:435
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1625
GetBitContext
Definition: get_bits.h:62
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2140
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:55
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2592
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:375
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:62
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
aligned
static int aligned(int val)
Definition: dashdec.c:169
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:856
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:403
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1823
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1627
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1038
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:150
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:98
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:404
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1628
g
const char * g
Definition: vf_curves.c:117
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:353
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:402
ff_thp_decoder
const AVCodec ff_thp_decoder
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2325
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2948
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:49
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2399
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:410
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:381
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:60
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:111
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
ff_smvjpeg_decoder
const AVCodec ff_smvjpeg_decoder
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:382
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1600
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:196
SOF13
@ SOF13
Definition: mjpeg.h:52
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:555
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:200
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1424
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:973
lowres
static int lowres
Definition: ffplay.c:334
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1545
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1335
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:68
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1432
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:508
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1073
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:327
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:874
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1635
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:263
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:139
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1213
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:322
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:164
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2180
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
src1
#define src1
Definition: h264pred.c:140
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2207
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:807
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:447
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1626
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1813
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:129
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1310
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:437
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:974
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:562
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:947
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:694
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1307
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2205
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:157
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVFrame::height
int height
Definition: frame.h:361
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:212
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:601
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:429
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1302
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:300
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
SOI
@ SOI
Definition: mjpeg.h:70
ff_mjpeg_decoder
const AVCodec ff_mjpeg_decoder
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1825
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1023
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:79
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:560
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
AVDictionaryEntry::value
char * value
Definition: dict.h:81
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:78