FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "decode.h"
40 #include "hwconfig.h"
41 #include "idctdsp.h"
42 #include "internal.h"
43 #include "jpegtables.h"
44 #include "mjpeg.h"
45 #include "mjpegdec.h"
46 #include "jpeglsdec.h"
47 #include "profiles.h"
48 #include "put_bits.h"
49 #include "tiff.h"
50 #include "exif.h"
51 #include "bytestream.h"
52 
53 
55 {
56  static const struct {
57  int class;
58  int index;
59  const uint8_t *bits;
60  const uint8_t *values;
61  int length;
62  } ht[] = {
64  avpriv_mjpeg_val_dc, 12 },
66  avpriv_mjpeg_val_dc, 12 },
75  };
76  int i, ret;
77 
78  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
79  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
80  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
81  ht[i].bits, ht[i].values,
82  ht[i].class == 1, s->avctx);
83  if (ret < 0)
84  return ret;
85 
86  if (ht[i].class < 2) {
87  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
88  ht[i].bits + 1, 16);
89  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
90  ht[i].values, ht[i].length);
91  }
92  }
93 
94  return 0;
95 }
96 
97 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
98 {
99  s->buggy_avid = 1;
100  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
101  s->interlace_polarity = 1;
102  if (len > 14 && buf[12] == 2) /* 2 - PAL */
103  s->interlace_polarity = 0;
104  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
105  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
106 }
107 
108 static void init_idct(AVCodecContext *avctx)
109 {
110  MJpegDecodeContext *s = avctx->priv_data;
111 
112  ff_idctdsp_init(&s->idsp, avctx);
113  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
115 }
116 
118 {
119  MJpegDecodeContext *s = avctx->priv_data;
120  int ret;
121 
122  if (!s->picture_ptr) {
123  s->picture = av_frame_alloc();
124  if (!s->picture)
125  return AVERROR(ENOMEM);
126  s->picture_ptr = s->picture;
127  }
128 
129  s->pkt = av_packet_alloc();
130  if (!s->pkt)
131  return AVERROR(ENOMEM);
132 
133  s->avctx = avctx;
134  ff_blockdsp_init(&s->bdsp, avctx);
135  ff_hpeldsp_init(&s->hdsp, avctx->flags);
136  init_idct(avctx);
137  s->buffer_size = 0;
138  s->buffer = NULL;
139  s->start_code = -1;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
153  return ret;
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
162  s->interlace_polarity = 1; /* bottom field first */
163  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
164  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
165  if (avctx->codec_tag == AV_RL32("MJPG"))
166  s->interlace_polarity = 1;
167  }
168 
169  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
170  if (avctx->extradata_size >= 4)
171  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
172 
173  if (s->smv_frames_per_jpeg <= 0) {
174  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
175  return AVERROR_INVALIDDATA;
176  }
177 
178  s->smv_frame = av_frame_alloc();
179  if (!s->smv_frame)
180  return AVERROR(ENOMEM);
181  } else if (avctx->extradata_size > 8
182  && AV_RL32(avctx->extradata) == 0x2C
183  && AV_RL32(avctx->extradata+4) == 0x18) {
184  parse_avid(s, avctx->extradata, avctx->extradata_size);
185  }
186 
187  if (avctx->codec->id == AV_CODEC_ID_AMV)
188  s->flipped = 1;
189 
190  return 0;
191 }
192 
193 
194 /* quantize tables */
196 {
197  int len, index, i;
198 
199  len = get_bits(&s->gb, 16) - 2;
200 
201  if (8*len > get_bits_left(&s->gb)) {
202  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  while (len >= 65) {
207  int pr = get_bits(&s->gb, 4);
208  if (pr > 1) {
209  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
210  return AVERROR_INVALIDDATA;
211  }
212  index = get_bits(&s->gb, 4);
213  if (index >= 4)
214  return -1;
215  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
216  /* read quant table */
217  for (i = 0; i < 64; i++) {
218  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
219  if (s->quant_matrixes[index][i] == 0) {
220  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
221  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
222  if (s->avctx->err_recognition & AV_EF_EXPLODE)
223  return AVERROR_INVALIDDATA;
224  }
225  }
226 
227  // XXX FIXME fine-tune, and perhaps add dc too
228  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
229  s->quant_matrixes[index][8]) >> 1;
230  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
231  index, s->qscale[index]);
232  len -= 1 + 64 * (1+pr);
233  }
234  return 0;
235 }
236 
237 /* decode huffman tables and build VLC decoders */
239 {
240  int len, index, i, class, n, v;
241  uint8_t bits_table[17];
242  uint8_t val_table[256];
243  int ret = 0;
244 
245  len = get_bits(&s->gb, 16) - 2;
246 
247  if (8*len > get_bits_left(&s->gb)) {
248  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
249  return AVERROR_INVALIDDATA;
250  }
251 
252  while (len > 0) {
253  if (len < 17)
254  return AVERROR_INVALIDDATA;
255  class = get_bits(&s->gb, 4);
256  if (class >= 2)
257  return AVERROR_INVALIDDATA;
258  index = get_bits(&s->gb, 4);
259  if (index >= 4)
260  return AVERROR_INVALIDDATA;
261  n = 0;
262  for (i = 1; i <= 16; i++) {
263  bits_table[i] = get_bits(&s->gb, 8);
264  n += bits_table[i];
265  }
266  len -= 17;
267  if (len < n || n > 256)
268  return AVERROR_INVALIDDATA;
269 
270  for (i = 0; i < n; i++) {
271  v = get_bits(&s->gb, 8);
272  val_table[i] = v;
273  }
274  len -= n;
275 
276  /* build VLC and flush previous vlc if present */
277  ff_free_vlc(&s->vlcs[class][index]);
278  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
279  class, index, n);
280  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
281  val_table, class > 0, s->avctx)) < 0)
282  return ret;
283 
284  if (class > 0) {
285  ff_free_vlc(&s->vlcs[2][index]);
286  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
287  val_table, 0, s->avctx)) < 0)
288  return ret;
289  }
290 
291  for (i = 0; i < 16; i++)
292  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
293  for (i = 0; i < 256; i++)
294  s->raw_huffman_values[class][index][i] = val_table[i];
295  }
296  return 0;
297 }
298 
300 {
301  int len, nb_components, i, width, height, bits, ret, size_change;
302  unsigned pix_fmt_id;
303  int h_count[MAX_COMPONENTS] = { 0 };
304  int v_count[MAX_COMPONENTS] = { 0 };
305 
306  s->cur_scan = 0;
307  memset(s->upscale_h, 0, sizeof(s->upscale_h));
308  memset(s->upscale_v, 0, sizeof(s->upscale_v));
309 
310  len = get_bits(&s->gb, 16);
311  bits = get_bits(&s->gb, 8);
312 
313  if (bits > 16 || bits < 1) {
314  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
315  return AVERROR_INVALIDDATA;
316  }
317 
318  if (s->avctx->bits_per_raw_sample != bits) {
319  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
320  s->avctx->bits_per_raw_sample = bits;
321  init_idct(s->avctx);
322  }
323  if (s->pegasus_rct)
324  bits = 9;
325  if (bits == 9 && !s->pegasus_rct)
326  s->rct = 1; // FIXME ugly
327 
328  if(s->lossless && s->avctx->lowres){
329  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
330  return -1;
331  }
332 
333  height = get_bits(&s->gb, 16);
334  width = get_bits(&s->gb, 16);
335 
336  // HACK for odd_height.mov
337  if (s->interlaced && s->width == width && s->height == height + 1)
338  height= s->height;
339 
340  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
341  if (av_image_check_size(width, height, 0, s->avctx) < 0)
342  return AVERROR_INVALIDDATA;
343  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
344  return AVERROR_INVALIDDATA;
345 
346  nb_components = get_bits(&s->gb, 8);
347  if (nb_components <= 0 ||
348  nb_components > MAX_COMPONENTS)
349  return -1;
350  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
351  if (nb_components != s->nb_components) {
352  av_log(s->avctx, AV_LOG_ERROR,
353  "nb_components changing in interlaced picture\n");
354  return AVERROR_INVALIDDATA;
355  }
356  }
357  if (s->ls && !(bits <= 8 || nb_components == 1)) {
359  "JPEG-LS that is not <= 8 "
360  "bits/component or 16-bit gray");
361  return AVERROR_PATCHWELCOME;
362  }
363  if (len != 8 + 3 * nb_components) {
364  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
365  return AVERROR_INVALIDDATA;
366  }
367 
368  s->nb_components = nb_components;
369  s->h_max = 1;
370  s->v_max = 1;
371  for (i = 0; i < nb_components; i++) {
372  /* component id */
373  s->component_id[i] = get_bits(&s->gb, 8) - 1;
374  h_count[i] = get_bits(&s->gb, 4);
375  v_count[i] = get_bits(&s->gb, 4);
376  /* compute hmax and vmax (only used in interleaved case) */
377  if (h_count[i] > s->h_max)
378  s->h_max = h_count[i];
379  if (v_count[i] > s->v_max)
380  s->v_max = v_count[i];
381  s->quant_index[i] = get_bits(&s->gb, 8);
382  if (s->quant_index[i] >= 4) {
383  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
384  return AVERROR_INVALIDDATA;
385  }
386  if (!h_count[i] || !v_count[i]) {
387  av_log(s->avctx, AV_LOG_ERROR,
388  "Invalid sampling factor in component %d %d:%d\n",
389  i, h_count[i], v_count[i]);
390  return AVERROR_INVALIDDATA;
391  }
392 
393  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
394  i, h_count[i], v_count[i],
395  s->component_id[i], s->quant_index[i]);
396  }
397  if ( nb_components == 4
398  && s->component_id[0] == 'C' - 1
399  && s->component_id[1] == 'M' - 1
400  && s->component_id[2] == 'Y' - 1
401  && s->component_id[3] == 'K' - 1)
402  s->adobe_transform = 0;
403 
404  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
405  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
406  return AVERROR_PATCHWELCOME;
407  }
408 
409  if (s->bayer) {
410  if (nb_components == 2) {
411  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
412  width stored in their SOF3 markers is the width of each one. We only output
413  a single component, therefore we need to adjust the output image width. We
414  handle the deinterleaving (but not the debayering) in this file. */
415  width *= 2;
416  }
417  /* They can also contain 1 component, which is double the width and half the height
418  of the final image (rows are interleaved). We don't handle the decoding in this
419  file, but leave that to the TIFF/DNG decoder. */
420  }
421 
422  /* if different size, realloc/alloc picture */
423  if (width != s->width || height != s->height || bits != s->bits ||
424  memcmp(s->h_count, h_count, sizeof(h_count)) ||
425  memcmp(s->v_count, v_count, sizeof(v_count))) {
426  size_change = 1;
427 
428  s->width = width;
429  s->height = height;
430  s->bits = bits;
431  memcpy(s->h_count, h_count, sizeof(h_count));
432  memcpy(s->v_count, v_count, sizeof(v_count));
433  s->interlaced = 0;
434  s->got_picture = 0;
435 
436  /* test interlaced mode */
437  if (s->first_picture &&
438  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
439  s->orig_height != 0 &&
440  s->height < ((s->orig_height * 3) / 4)) {
441  s->interlaced = 1;
442  s->bottom_field = s->interlace_polarity;
443  s->picture_ptr->interlaced_frame = 1;
444  s->picture_ptr->top_field_first = !s->interlace_polarity;
445  height *= 2;
446  }
447 
448  ret = ff_set_dimensions(s->avctx, width, height);
449  if (ret < 0)
450  return ret;
451 
452  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
453  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
454  s->orig_height < height)
455  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
456 
457  s->first_picture = 0;
458  } else {
459  size_change = 0;
460  }
461 
462  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
463  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
464  if (s->avctx->height <= 0)
465  return AVERROR_INVALIDDATA;
466  }
467 
468  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
469  if (s->progressive) {
470  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
471  return AVERROR_INVALIDDATA;
472  }
473  } else {
474  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
475  s->rgb = 1;
476  else if (!s->lossless)
477  s->rgb = 0;
478  /* XXX: not complete test ! */
479  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
480  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
481  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
482  (s->h_count[3] << 4) | s->v_count[3];
483  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
484  /* NOTE we do not allocate pictures large enough for the possible
485  * padding of h/v_count being 4 */
486  if (!(pix_fmt_id & 0xD0D0D0D0))
487  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
488  if (!(pix_fmt_id & 0x0D0D0D0D))
489  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
490 
491  for (i = 0; i < 8; i++) {
492  int j = 6 + (i&1) - (i&6);
493  int is = (pix_fmt_id >> (4*i)) & 0xF;
494  int js = (pix_fmt_id >> (4*j)) & 0xF;
495 
496  if (is == 1 && js != 2 && (i < 2 || i > 5))
497  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
498  if (is == 1 && js != 2 && (i < 2 || i > 5))
499  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
500 
501  if (is == 1 && js == 2) {
502  if (i & 1) s->upscale_h[j/2] = 1;
503  else s->upscale_v[j/2] = 1;
504  }
505  }
506 
507  if (s->bayer) {
508  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
509  goto unk_pixfmt;
510  }
511 
512  switch (pix_fmt_id) {
513  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
514  if (!s->bayer)
515  goto unk_pixfmt;
516  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
517  break;
518  case 0x11111100:
519  if (s->rgb)
520  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
521  else {
522  if ( s->adobe_transform == 0
523  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
524  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
525  } else {
526  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
527  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
528  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
529  }
530  }
531  av_assert0(s->nb_components == 3);
532  break;
533  case 0x11111111:
534  if (s->rgb)
535  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
536  else {
537  if (s->adobe_transform == 0 && s->bits <= 8) {
538  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
539  } else {
540  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
541  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
542  }
543  }
544  av_assert0(s->nb_components == 4);
545  break;
546  case 0x22111122:
547  case 0x22111111:
548  if (s->adobe_transform == 0 && s->bits <= 8) {
549  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
550  s->upscale_v[1] = s->upscale_v[2] = 1;
551  s->upscale_h[1] = s->upscale_h[2] = 1;
552  } else if (s->adobe_transform == 2 && s->bits <= 8) {
553  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
554  s->upscale_v[1] = s->upscale_v[2] = 1;
555  s->upscale_h[1] = s->upscale_h[2] = 1;
556  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
557  } else {
558  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
559  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
560  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
561  }
562  av_assert0(s->nb_components == 4);
563  break;
564  case 0x12121100:
565  case 0x22122100:
566  case 0x21211100:
567  case 0x21112100:
568  case 0x22211200:
569  case 0x22221100:
570  case 0x22112200:
571  case 0x11222200:
572  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
573  else
574  goto unk_pixfmt;
575  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
576  break;
577  case 0x11000000:
578  case 0x13000000:
579  case 0x14000000:
580  case 0x31000000:
581  case 0x33000000:
582  case 0x34000000:
583  case 0x41000000:
584  case 0x43000000:
585  case 0x44000000:
586  if(s->bits <= 8)
587  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
588  else
589  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
590  break;
591  case 0x12111100:
592  case 0x14121200:
593  case 0x14111100:
594  case 0x22211100:
595  case 0x22112100:
596  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
597  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
598  else
599  goto unk_pixfmt;
600  s->upscale_v[0] = s->upscale_v[1] = 1;
601  } else {
602  if (pix_fmt_id == 0x14111100)
603  s->upscale_v[1] = s->upscale_v[2] = 1;
604  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
605  else
606  goto unk_pixfmt;
607  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
608  }
609  break;
610  case 0x21111100:
611  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
612  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
613  else
614  goto unk_pixfmt;
615  s->upscale_h[0] = s->upscale_h[1] = 1;
616  } else {
617  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
618  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
619  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
620  }
621  break;
622  case 0x31111100:
623  if (s->bits > 8)
624  goto unk_pixfmt;
625  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
626  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
627  s->upscale_h[1] = s->upscale_h[2] = 2;
628  break;
629  case 0x22121100:
630  case 0x22111200:
631  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
632  else
633  goto unk_pixfmt;
634  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
635  break;
636  case 0x22111100:
637  case 0x23111100:
638  case 0x42111100:
639  case 0x24111100:
640  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
641  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
642  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
643  if (pix_fmt_id == 0x42111100) {
644  if (s->bits > 8)
645  goto unk_pixfmt;
646  s->upscale_h[1] = s->upscale_h[2] = 1;
647  } else if (pix_fmt_id == 0x24111100) {
648  if (s->bits > 8)
649  goto unk_pixfmt;
650  s->upscale_v[1] = s->upscale_v[2] = 1;
651  } else if (pix_fmt_id == 0x23111100) {
652  if (s->bits > 8)
653  goto unk_pixfmt;
654  s->upscale_v[1] = s->upscale_v[2] = 2;
655  }
656  break;
657  case 0x41111100:
658  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
659  else
660  goto unk_pixfmt;
661  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
662  break;
663  default:
664  unk_pixfmt:
665  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
666  memset(s->upscale_h, 0, sizeof(s->upscale_h));
667  memset(s->upscale_v, 0, sizeof(s->upscale_v));
668  return AVERROR_PATCHWELCOME;
669  }
670  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
671  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
672  return AVERROR_PATCHWELCOME;
673  }
674  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
675  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
676  return AVERROR_PATCHWELCOME;
677  }
678  if (s->ls) {
679  memset(s->upscale_h, 0, sizeof(s->upscale_h));
680  memset(s->upscale_v, 0, sizeof(s->upscale_v));
681  if (s->nb_components == 3) {
682  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
683  } else if (s->nb_components != 1) {
684  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
685  return AVERROR_PATCHWELCOME;
686  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
687  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
688  else if (s->bits <= 8)
689  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
690  else
691  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
692  }
693 
694  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
695  if (!s->pix_desc) {
696  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
697  return AVERROR_BUG;
698  }
699 
700  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
701  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
702  } else {
703  enum AVPixelFormat pix_fmts[] = {
704 #if CONFIG_MJPEG_NVDEC_HWACCEL
706 #endif
707 #if CONFIG_MJPEG_VAAPI_HWACCEL
709 #endif
710  s->avctx->pix_fmt,
712  };
713  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
714  if (s->hwaccel_pix_fmt < 0)
715  return AVERROR(EINVAL);
716 
717  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
718  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
719  }
720 
721  if (s->avctx->skip_frame == AVDISCARD_ALL) {
722  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
723  s->picture_ptr->key_frame = 1;
724  s->got_picture = 1;
725  return 0;
726  }
727 
728  av_frame_unref(s->picture_ptr);
729  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
730  return -1;
731  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
732  s->picture_ptr->key_frame = 1;
733  s->got_picture = 1;
734 
735  // Lets clear the palette to avoid leaving uninitialized values in it
736  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
737  memset(s->picture_ptr->data[1], 0, 1024);
738 
739  for (i = 0; i < 4; i++)
740  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
741 
742  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
743  s->width, s->height, s->linesize[0], s->linesize[1],
744  s->interlaced, s->avctx->height);
745 
746  }
747 
748  if ((s->rgb && !s->lossless && !s->ls) ||
749  (!s->rgb && s->ls && s->nb_components > 1) ||
750  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
751  ) {
752  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
753  return AVERROR_PATCHWELCOME;
754  }
755 
756  /* totally blank picture as progressive JPEG will only add details to it */
757  if (s->progressive) {
758  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
759  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
760  for (i = 0; i < s->nb_components; i++) {
761  int size = bw * bh * s->h_count[i] * s->v_count[i];
762  av_freep(&s->blocks[i]);
763  av_freep(&s->last_nnz[i]);
764  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
765  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
766  if (!s->blocks[i] || !s->last_nnz[i])
767  return AVERROR(ENOMEM);
768  s->block_stride[i] = bw * s->h_count[i];
769  }
770  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
771  }
772 
773  if (s->avctx->hwaccel) {
774  s->hwaccel_picture_private =
775  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
776  if (!s->hwaccel_picture_private)
777  return AVERROR(ENOMEM);
778 
779  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
780  s->raw_image_buffer_size);
781  if (ret < 0)
782  return ret;
783  }
784 
785  return 0;
786 }
787 
788 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
789 {
790  int code;
791  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
792  if (code < 0 || code > 16) {
793  av_log(s->avctx, AV_LOG_WARNING,
794  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
795  0, dc_index, &s->vlcs[0][dc_index]);
796  return 0xfffff;
797  }
798 
799  if (code)
800  return get_xbits(&s->gb, code);
801  else
802  return 0;
803 }
804 
805 /* decode block and dequantize */
806 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
807  int dc_index, int ac_index, uint16_t *quant_matrix)
808 {
809  int code, i, j, level, val;
810 
811  /* DC coef */
812  val = mjpeg_decode_dc(s, dc_index);
813  if (val == 0xfffff) {
814  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
815  return AVERROR_INVALIDDATA;
816  }
817  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
818  val = av_clip_int16(val);
819  s->last_dc[component] = val;
820  block[0] = val;
821  /* AC coefs */
822  i = 0;
823  {OPEN_READER(re, &s->gb);
824  do {
825  UPDATE_CACHE(re, &s->gb);
826  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
827 
828  i += ((unsigned)code) >> 4;
829  code &= 0xf;
830  if (code) {
831  if (code > MIN_CACHE_BITS - 16)
832  UPDATE_CACHE(re, &s->gb);
833 
834  {
835  int cache = GET_CACHE(re, &s->gb);
836  int sign = (~cache) >> 31;
837  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
838  }
839 
840  LAST_SKIP_BITS(re, &s->gb, code);
841 
842  if (i > 63) {
843  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
844  return AVERROR_INVALIDDATA;
845  }
846  j = s->scantable.permutated[i];
847  block[j] = level * quant_matrix[i];
848  }
849  } while (i < 63);
850  CLOSE_READER(re, &s->gb);}
851 
852  return 0;
853 }
854 
856  int component, int dc_index,
857  uint16_t *quant_matrix, int Al)
858 {
859  unsigned val;
860  s->bdsp.clear_block(block);
861  val = mjpeg_decode_dc(s, dc_index);
862  if (val == 0xfffff) {
863  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
864  return AVERROR_INVALIDDATA;
865  }
866  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
867  s->last_dc[component] = val;
868  block[0] = val;
869  return 0;
870 }
871 
872 /* decode block and dequantize - progressive JPEG version */
874  uint8_t *last_nnz, int ac_index,
875  uint16_t *quant_matrix,
876  int ss, int se, int Al, int *EOBRUN)
877 {
878  int code, i, j, val, run;
879  unsigned level;
880 
881  if (*EOBRUN) {
882  (*EOBRUN)--;
883  return 0;
884  }
885 
886  {
887  OPEN_READER(re, &s->gb);
888  for (i = ss; ; i++) {
889  UPDATE_CACHE(re, &s->gb);
890  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
891 
892  run = ((unsigned) code) >> 4;
893  code &= 0xF;
894  if (code) {
895  i += run;
896  if (code > MIN_CACHE_BITS - 16)
897  UPDATE_CACHE(re, &s->gb);
898 
899  {
900  int cache = GET_CACHE(re, &s->gb);
901  int sign = (~cache) >> 31;
902  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
903  }
904 
905  LAST_SKIP_BITS(re, &s->gb, code);
906 
907  if (i >= se) {
908  if (i == se) {
909  j = s->scantable.permutated[se];
910  block[j] = level * (quant_matrix[se] << Al);
911  break;
912  }
913  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
914  return AVERROR_INVALIDDATA;
915  }
916  j = s->scantable.permutated[i];
917  block[j] = level * (quant_matrix[i] << Al);
918  } else {
919  if (run == 0xF) {// ZRL - skip 15 coefficients
920  i += 15;
921  if (i >= se) {
922  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
923  return AVERROR_INVALIDDATA;
924  }
925  } else {
926  val = (1 << run);
927  if (run) {
928  UPDATE_CACHE(re, &s->gb);
929  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
930  LAST_SKIP_BITS(re, &s->gb, run);
931  }
932  *EOBRUN = val - 1;
933  break;
934  }
935  }
936  }
937  CLOSE_READER(re, &s->gb);
938  }
939 
940  if (i > *last_nnz)
941  *last_nnz = i;
942 
943  return 0;
944 }
945 
946 #define REFINE_BIT(j) { \
947  UPDATE_CACHE(re, &s->gb); \
948  sign = block[j] >> 15; \
949  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
950  ((quant_matrix[i] ^ sign) - sign) << Al; \
951  LAST_SKIP_BITS(re, &s->gb, 1); \
952 }
953 
954 #define ZERO_RUN \
955 for (; ; i++) { \
956  if (i > last) { \
957  i += run; \
958  if (i > se) { \
959  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
960  return -1; \
961  } \
962  break; \
963  } \
964  j = s->scantable.permutated[i]; \
965  if (block[j]) \
966  REFINE_BIT(j) \
967  else if (run-- == 0) \
968  break; \
969 }
970 
971 /* decode block and dequantize - progressive JPEG refinement pass */
973  uint8_t *last_nnz,
974  int ac_index, uint16_t *quant_matrix,
975  int ss, int se, int Al, int *EOBRUN)
976 {
977  int code, i = ss, j, sign, val, run;
978  int last = FFMIN(se, *last_nnz);
979 
980  OPEN_READER(re, &s->gb);
981  if (*EOBRUN) {
982  (*EOBRUN)--;
983  } else {
984  for (; ; i++) {
985  UPDATE_CACHE(re, &s->gb);
986  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
987 
988  if (code & 0xF) {
989  run = ((unsigned) code) >> 4;
990  UPDATE_CACHE(re, &s->gb);
991  val = SHOW_UBITS(re, &s->gb, 1);
992  LAST_SKIP_BITS(re, &s->gb, 1);
993  ZERO_RUN;
994  j = s->scantable.permutated[i];
995  val--;
996  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
997  if (i == se) {
998  if (i > *last_nnz)
999  *last_nnz = i;
1000  CLOSE_READER(re, &s->gb);
1001  return 0;
1002  }
1003  } else {
1004  run = ((unsigned) code) >> 4;
1005  if (run == 0xF) {
1006  ZERO_RUN;
1007  } else {
1008  val = run;
1009  run = (1 << run);
1010  if (val) {
1011  UPDATE_CACHE(re, &s->gb);
1012  run += SHOW_UBITS(re, &s->gb, val);
1013  LAST_SKIP_BITS(re, &s->gb, val);
1014  }
1015  *EOBRUN = run - 1;
1016  break;
1017  }
1018  }
1019  }
1020 
1021  if (i > *last_nnz)
1022  *last_nnz = i;
1023  }
1024 
1025  for (; i <= last; i++) {
1026  j = s->scantable.permutated[i];
1027  if (block[j])
1028  REFINE_BIT(j)
1029  }
1030  CLOSE_READER(re, &s->gb);
1031 
1032  return 0;
1033 }
1034 #undef REFINE_BIT
1035 #undef ZERO_RUN
1036 
1037 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1038 {
1039  int i;
1040  int reset = 0;
1041 
1042  if (s->restart_interval) {
1043  s->restart_count--;
1044  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1045  align_get_bits(&s->gb);
1046  for (i = 0; i < nb_components; i++) /* reset dc */
1047  s->last_dc[i] = (4 << s->bits);
1048  }
1049 
1050  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1051  /* skip RSTn */
1052  if (s->restart_count == 0) {
1053  if( show_bits(&s->gb, i) == (1 << i) - 1
1054  || show_bits(&s->gb, i) == 0xFF) {
1055  int pos = get_bits_count(&s->gb);
1056  align_get_bits(&s->gb);
1057  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1058  skip_bits(&s->gb, 8);
1059  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1060  for (i = 0; i < nb_components; i++) /* reset dc */
1061  s->last_dc[i] = (4 << s->bits);
1062  reset = 1;
1063  } else
1064  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1065  }
1066  }
1067  }
1068  return reset;
1069 }
1070 
1071 /* Handles 1 to 4 components */
1072 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1073 {
1074  int i, mb_x, mb_y;
1075  unsigned width;
1076  uint16_t (*buffer)[4];
1077  int left[4], top[4], topleft[4];
1078  const int linesize = s->linesize[0];
1079  const int mask = ((1 << s->bits) - 1) << point_transform;
1080  int resync_mb_y = 0;
1081  int resync_mb_x = 0;
1082  int vpred[6];
1083 
1084  if (!s->bayer && s->nb_components < 3)
1085  return AVERROR_INVALIDDATA;
1086  if (s->bayer && s->nb_components > 2)
1087  return AVERROR_INVALIDDATA;
1088  if (s->nb_components <= 0 || s->nb_components > 4)
1089  return AVERROR_INVALIDDATA;
1090  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1091  return AVERROR_INVALIDDATA;
1092 
1093 
1094  s->restart_count = s->restart_interval;
1095 
1096  if (s->restart_interval == 0)
1097  s->restart_interval = INT_MAX;
1098 
1099  if (s->bayer)
1100  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1101  else
1102  width = s->mb_width;
1103 
1104  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1105  if (!s->ljpeg_buffer)
1106  return AVERROR(ENOMEM);
1107 
1108  buffer = s->ljpeg_buffer;
1109 
1110  for (i = 0; i < 4; i++)
1111  buffer[0][i] = 1 << (s->bits - 1);
1112 
1113  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1114  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1115 
1116  if (s->interlaced && s->bottom_field)
1117  ptr += linesize >> 1;
1118 
1119  for (i = 0; i < 4; i++)
1120  top[i] = left[i] = topleft[i] = buffer[0][i];
1121 
1122  if ((mb_y * s->width) % s->restart_interval == 0) {
1123  for (i = 0; i < 6; i++)
1124  vpred[i] = 1 << (s->bits-1);
1125  }
1126 
1127  for (mb_x = 0; mb_x < width; mb_x++) {
1128  int modified_predictor = predictor;
1129 
1130  if (get_bits_left(&s->gb) < 1) {
1131  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1132  return AVERROR_INVALIDDATA;
1133  }
1134 
1135  if (s->restart_interval && !s->restart_count){
1136  s->restart_count = s->restart_interval;
1137  resync_mb_x = mb_x;
1138  resync_mb_y = mb_y;
1139  for(i=0; i<4; i++)
1140  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1141  }
1142  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1143  modified_predictor = 1;
1144 
1145  for (i=0;i<nb_components;i++) {
1146  int pred, dc;
1147 
1148  topleft[i] = top[i];
1149  top[i] = buffer[mb_x][i];
1150 
1151  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1152  if(dc == 0xFFFFF)
1153  return -1;
1154 
1155  if (!s->bayer || mb_x) {
1156  pred = left[i];
1157  } else { /* This path runs only for the first line in bayer images */
1158  vpred[i] += dc;
1159  pred = vpred[i] - dc;
1160  }
1161 
1162  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1163 
1164  left[i] = buffer[mb_x][i] =
1165  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1166  }
1167 
1168  if (s->restart_interval && !--s->restart_count) {
1169  align_get_bits(&s->gb);
1170  skip_bits(&s->gb, 16); /* skip RSTn */
1171  }
1172  }
1173  if (s->rct && s->nb_components == 4) {
1174  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1175  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1176  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1177  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1178  ptr[4*mb_x + 0] = buffer[mb_x][3];
1179  }
1180  } else if (s->nb_components == 4) {
1181  for(i=0; i<nb_components; i++) {
1182  int c= s->comp_index[i];
1183  if (s->bits <= 8) {
1184  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1185  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1186  }
1187  } else if(s->bits == 9) {
1188  return AVERROR_PATCHWELCOME;
1189  } else {
1190  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1191  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1192  }
1193  }
1194  }
1195  } else if (s->rct) {
1196  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1197  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1198  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1199  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1200  }
1201  } else if (s->pegasus_rct) {
1202  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1203  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1204  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1205  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1206  }
1207  } else if (s->bayer) {
1208  if (nb_components == 1) {
1209  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1210  for (mb_x = 0; mb_x < width; mb_x++)
1211  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1212  } else if (nb_components == 2) {
1213  for (mb_x = 0; mb_x < width; mb_x++) {
1214  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1215  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1216  }
1217  }
1218  } else {
1219  for(i=0; i<nb_components; i++) {
1220  int c= s->comp_index[i];
1221  if (s->bits <= 8) {
1222  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1223  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1224  }
1225  } else if(s->bits == 9) {
1226  return AVERROR_PATCHWELCOME;
1227  } else {
1228  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1229  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1230  }
1231  }
1232  }
1233  }
1234  }
1235  return 0;
1236 }
1237 
1239  int point_transform, int nb_components)
1240 {
1241  int i, mb_x, mb_y, mask;
1242  int bits= (s->bits+7)&~7;
1243  int resync_mb_y = 0;
1244  int resync_mb_x = 0;
1245 
1246  point_transform += bits - s->bits;
1247  mask = ((1 << s->bits) - 1) << point_transform;
1248 
1249  av_assert0(nb_components>=1 && nb_components<=4);
1250 
1251  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1252  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1253  if (get_bits_left(&s->gb) < 1) {
1254  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1255  return AVERROR_INVALIDDATA;
1256  }
1257  if (s->restart_interval && !s->restart_count){
1258  s->restart_count = s->restart_interval;
1259  resync_mb_x = mb_x;
1260  resync_mb_y = mb_y;
1261  }
1262 
1263  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1264  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1265  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1266  for (i = 0; i < nb_components; i++) {
1267  uint8_t *ptr;
1268  uint16_t *ptr16;
1269  int n, h, v, x, y, c, j, linesize;
1270  n = s->nb_blocks[i];
1271  c = s->comp_index[i];
1272  h = s->h_scount[i];
1273  v = s->v_scount[i];
1274  x = 0;
1275  y = 0;
1276  linesize= s->linesize[c];
1277 
1278  if(bits>8) linesize /= 2;
1279 
1280  for(j=0; j<n; j++) {
1281  int pred, dc;
1282 
1283  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1284  if(dc == 0xFFFFF)
1285  return -1;
1286  if ( h * mb_x + x >= s->width
1287  || v * mb_y + y >= s->height) {
1288  // Nothing to do
1289  } else if (bits<=8) {
1290  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1291  if(y==0 && toprow){
1292  if(x==0 && leftcol){
1293  pred= 1 << (bits - 1);
1294  }else{
1295  pred= ptr[-1];
1296  }
1297  }else{
1298  if(x==0 && leftcol){
1299  pred= ptr[-linesize];
1300  }else{
1301  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1302  }
1303  }
1304 
1305  if (s->interlaced && s->bottom_field)
1306  ptr += linesize >> 1;
1307  pred &= mask;
1308  *ptr= pred + ((unsigned)dc << point_transform);
1309  }else{
1310  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1311  if(y==0 && toprow){
1312  if(x==0 && leftcol){
1313  pred= 1 << (bits - 1);
1314  }else{
1315  pred= ptr16[-1];
1316  }
1317  }else{
1318  if(x==0 && leftcol){
1319  pred= ptr16[-linesize];
1320  }else{
1321  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1322  }
1323  }
1324 
1325  if (s->interlaced && s->bottom_field)
1326  ptr16 += linesize >> 1;
1327  pred &= mask;
1328  *ptr16= pred + ((unsigned)dc << point_transform);
1329  }
1330  if (++x == h) {
1331  x = 0;
1332  y++;
1333  }
1334  }
1335  }
1336  } else {
1337  for (i = 0; i < nb_components; i++) {
1338  uint8_t *ptr;
1339  uint16_t *ptr16;
1340  int n, h, v, x, y, c, j, linesize, dc;
1341  n = s->nb_blocks[i];
1342  c = s->comp_index[i];
1343  h = s->h_scount[i];
1344  v = s->v_scount[i];
1345  x = 0;
1346  y = 0;
1347  linesize = s->linesize[c];
1348 
1349  if(bits>8) linesize /= 2;
1350 
1351  for (j = 0; j < n; j++) {
1352  int pred;
1353 
1354  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1355  if(dc == 0xFFFFF)
1356  return -1;
1357  if ( h * mb_x + x >= s->width
1358  || v * mb_y + y >= s->height) {
1359  // Nothing to do
1360  } else if (bits<=8) {
1361  ptr = s->picture_ptr->data[c] +
1362  (linesize * (v * mb_y + y)) +
1363  (h * mb_x + x); //FIXME optimize this crap
1364  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1365 
1366  pred &= mask;
1367  *ptr = pred + ((unsigned)dc << point_transform);
1368  }else{
1369  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1370  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1371 
1372  pred &= mask;
1373  *ptr16= pred + ((unsigned)dc << point_transform);
1374  }
1375 
1376  if (++x == h) {
1377  x = 0;
1378  y++;
1379  }
1380  }
1381  }
1382  }
1383  if (s->restart_interval && !--s->restart_count) {
1384  align_get_bits(&s->gb);
1385  skip_bits(&s->gb, 16); /* skip RSTn */
1386  }
1387  }
1388  }
1389  return 0;
1390 }
1391 
1393  uint8_t *dst, const uint8_t *src,
1394  int linesize, int lowres)
1395 {
1396  switch (lowres) {
1397  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1398  break;
1399  case 1: copy_block4(dst, src, linesize, linesize, 4);
1400  break;
1401  case 2: copy_block2(dst, src, linesize, linesize, 2);
1402  break;
1403  case 3: *dst = *src;
1404  break;
1405  }
1406 }
1407 
1408 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1409 {
1410  int block_x, block_y;
1411  int size = 8 >> s->avctx->lowres;
1412  if (s->bits > 8) {
1413  for (block_y=0; block_y<size; block_y++)
1414  for (block_x=0; block_x<size; block_x++)
1415  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1416  } else {
1417  for (block_y=0; block_y<size; block_y++)
1418  for (block_x=0; block_x<size; block_x++)
1419  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1420  }
1421 }
1422 
1423 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1424  int Al, const uint8_t *mb_bitmask,
1425  int mb_bitmask_size,
1426  const AVFrame *reference)
1427 {
1428  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1429  uint8_t *data[MAX_COMPONENTS];
1430  const uint8_t *reference_data[MAX_COMPONENTS];
1431  int linesize[MAX_COMPONENTS];
1432  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1433  int bytes_per_pixel = 1 + (s->bits > 8);
1434 
1435  if (mb_bitmask) {
1436  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1437  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1438  return AVERROR_INVALIDDATA;
1439  }
1440  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1441  }
1442 
1443  s->restart_count = 0;
1444 
1445  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1446  &chroma_v_shift);
1447  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1448  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1449 
1450  for (i = 0; i < nb_components; i++) {
1451  int c = s->comp_index[i];
1452  data[c] = s->picture_ptr->data[c];
1453  reference_data[c] = reference ? reference->data[c] : NULL;
1454  linesize[c] = s->linesize[c];
1455  s->coefs_finished[c] |= 1;
1456  }
1457 
1458  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1459  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1460  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1461 
1462  if (s->restart_interval && !s->restart_count)
1463  s->restart_count = s->restart_interval;
1464 
1465  if (get_bits_left(&s->gb) < 0) {
1466  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1467  -get_bits_left(&s->gb));
1468  return AVERROR_INVALIDDATA;
1469  }
1470  for (i = 0; i < nb_components; i++) {
1471  uint8_t *ptr;
1472  int n, h, v, x, y, c, j;
1473  int block_offset;
1474  n = s->nb_blocks[i];
1475  c = s->comp_index[i];
1476  h = s->h_scount[i];
1477  v = s->v_scount[i];
1478  x = 0;
1479  y = 0;
1480  for (j = 0; j < n; j++) {
1481  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1482  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1483 
1484  if (s->interlaced && s->bottom_field)
1485  block_offset += linesize[c] >> 1;
1486  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1487  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1488  ptr = data[c] + block_offset;
1489  } else
1490  ptr = NULL;
1491  if (!s->progressive) {
1492  if (copy_mb) {
1493  if (ptr)
1494  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1495  linesize[c], s->avctx->lowres);
1496 
1497  } else {
1498  s->bdsp.clear_block(s->block);
1499  if (decode_block(s, s->block, i,
1500  s->dc_index[i], s->ac_index[i],
1501  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1502  av_log(s->avctx, AV_LOG_ERROR,
1503  "error y=%d x=%d\n", mb_y, mb_x);
1504  return AVERROR_INVALIDDATA;
1505  }
1506  if (ptr) {
1507  s->idsp.idct_put(ptr, linesize[c], s->block);
1508  if (s->bits & 7)
1509  shift_output(s, ptr, linesize[c]);
1510  }
1511  }
1512  } else {
1513  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1514  (h * mb_x + x);
1515  int16_t *block = s->blocks[c][block_idx];
1516  if (Ah)
1517  block[0] += get_bits1(&s->gb) *
1518  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1519  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1520  s->quant_matrixes[s->quant_sindex[i]],
1521  Al) < 0) {
1522  av_log(s->avctx, AV_LOG_ERROR,
1523  "error y=%d x=%d\n", mb_y, mb_x);
1524  return AVERROR_INVALIDDATA;
1525  }
1526  }
1527  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1528  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1529  mb_x, mb_y, x, y, c, s->bottom_field,
1530  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1531  if (++x == h) {
1532  x = 0;
1533  y++;
1534  }
1535  }
1536  }
1537 
1538  handle_rstn(s, nb_components);
1539  }
1540  }
1541  return 0;
1542 }
1543 
1545  int se, int Ah, int Al)
1546 {
1547  int mb_x, mb_y;
1548  int EOBRUN = 0;
1549  int c = s->comp_index[0];
1550  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1551 
1552  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1553  if (se < ss || se > 63) {
1554  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1555  return AVERROR_INVALIDDATA;
1556  }
1557 
1558  // s->coefs_finished is a bitmask for coefficients coded
1559  // ss and se are parameters telling start and end coefficients
1560  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1561 
1562  s->restart_count = 0;
1563 
1564  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1565  int block_idx = mb_y * s->block_stride[c];
1566  int16_t (*block)[64] = &s->blocks[c][block_idx];
1567  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1568  if (get_bits_left(&s->gb) <= 0) {
1569  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1570  return AVERROR_INVALIDDATA;
1571  }
1572  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1573  int ret;
1574  if (s->restart_interval && !s->restart_count)
1575  s->restart_count = s->restart_interval;
1576 
1577  if (Ah)
1578  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1579  quant_matrix, ss, se, Al, &EOBRUN);
1580  else
1581  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1582  quant_matrix, ss, se, Al, &EOBRUN);
1583  if (ret < 0) {
1584  av_log(s->avctx, AV_LOG_ERROR,
1585  "error y=%d x=%d\n", mb_y, mb_x);
1586  return AVERROR_INVALIDDATA;
1587  }
1588 
1589  if (handle_rstn(s, 0))
1590  EOBRUN = 0;
1591  }
1592  }
1593  return 0;
1594 }
1595 
1597 {
1598  int mb_x, mb_y;
1599  int c;
1600  const int bytes_per_pixel = 1 + (s->bits > 8);
1601  const int block_size = s->lossless ? 1 : 8;
1602 
1603  for (c = 0; c < s->nb_components; c++) {
1604  uint8_t *data = s->picture_ptr->data[c];
1605  int linesize = s->linesize[c];
1606  int h = s->h_max / s->h_count[c];
1607  int v = s->v_max / s->v_count[c];
1608  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1609  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1610 
1611  if (~s->coefs_finished[c])
1612  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1613 
1614  if (s->interlaced && s->bottom_field)
1615  data += linesize >> 1;
1616 
1617  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1618  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1619  int block_idx = mb_y * s->block_stride[c];
1620  int16_t (*block)[64] = &s->blocks[c][block_idx];
1621  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1622  s->idsp.idct_put(ptr, linesize, *block);
1623  if (s->bits & 7)
1624  shift_output(s, ptr, linesize);
1625  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1626  }
1627  }
1628  }
1629 }
1630 
1631 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1632  int mb_bitmask_size, const AVFrame *reference)
1633 {
1634  int len, nb_components, i, h, v, predictor, point_transform;
1635  int index, id, ret;
1636  const int block_size = s->lossless ? 1 : 8;
1637  int ilv, prev_shift;
1638 
1639  if (!s->got_picture) {
1640  av_log(s->avctx, AV_LOG_WARNING,
1641  "Can not process SOS before SOF, skipping\n");
1642  return -1;
1643  }
1644 
1645  if (reference) {
1646  if (reference->width != s->picture_ptr->width ||
1647  reference->height != s->picture_ptr->height ||
1648  reference->format != s->picture_ptr->format) {
1649  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1650  return AVERROR_INVALIDDATA;
1651  }
1652  }
1653 
1654  /* XXX: verify len field validity */
1655  len = get_bits(&s->gb, 16);
1656  nb_components = get_bits(&s->gb, 8);
1657  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1659  "decode_sos: nb_components (%d)",
1660  nb_components);
1661  return AVERROR_PATCHWELCOME;
1662  }
1663  if (len != 6 + 2 * nb_components) {
1664  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1665  return AVERROR_INVALIDDATA;
1666  }
1667  for (i = 0; i < nb_components; i++) {
1668  id = get_bits(&s->gb, 8) - 1;
1669  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1670  /* find component index */
1671  for (index = 0; index < s->nb_components; index++)
1672  if (id == s->component_id[index])
1673  break;
1674  if (index == s->nb_components) {
1675  av_log(s->avctx, AV_LOG_ERROR,
1676  "decode_sos: index(%d) out of components\n", index);
1677  return AVERROR_INVALIDDATA;
1678  }
1679  /* Metasoft MJPEG codec has Cb and Cr swapped */
1680  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1681  && nb_components == 3 && s->nb_components == 3 && i)
1682  index = 3 - i;
1683 
1684  s->quant_sindex[i] = s->quant_index[index];
1685  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1686  s->h_scount[i] = s->h_count[index];
1687  s->v_scount[i] = s->v_count[index];
1688 
1689  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1690  index = (index+2)%3;
1691 
1692  s->comp_index[i] = index;
1693 
1694  s->dc_index[i] = get_bits(&s->gb, 4);
1695  s->ac_index[i] = get_bits(&s->gb, 4);
1696 
1697  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1698  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1699  goto out_of_range;
1700  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1701  goto out_of_range;
1702  }
1703 
1704  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1705  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1706  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1707  prev_shift = get_bits(&s->gb, 4); /* Ah */
1708  point_transform = get_bits(&s->gb, 4); /* Al */
1709  }else
1710  prev_shift = point_transform = 0;
1711 
1712  if (nb_components > 1) {
1713  /* interleaved stream */
1714  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1715  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1716  } else if (!s->ls) { /* skip this for JPEG-LS */
1717  h = s->h_max / s->h_scount[0];
1718  v = s->v_max / s->v_scount[0];
1719  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1720  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1721  s->nb_blocks[0] = 1;
1722  s->h_scount[0] = 1;
1723  s->v_scount[0] = 1;
1724  }
1725 
1726  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1727  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1728  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1729  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1730  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1731 
1732 
1733  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1734  for (i = s->mjpb_skiptosod; i > 0; i--)
1735  skip_bits(&s->gb, 8);
1736 
1737 next_field:
1738  for (i = 0; i < nb_components; i++)
1739  s->last_dc[i] = (4 << s->bits);
1740 
1741  if (s->avctx->hwaccel) {
1742  int bytes_to_start = get_bits_count(&s->gb) / 8;
1743  av_assert0(bytes_to_start >= 0 &&
1744  s->raw_scan_buffer_size >= bytes_to_start);
1745 
1746  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1747  s->raw_scan_buffer + bytes_to_start,
1748  s->raw_scan_buffer_size - bytes_to_start);
1749  if (ret < 0)
1750  return ret;
1751 
1752  } else if (s->lossless) {
1753  av_assert0(s->picture_ptr == s->picture);
1754  if (CONFIG_JPEGLS_DECODER && s->ls) {
1755 // for () {
1756 // reset_ls_coding_parameters(s, 0);
1757 
1759  point_transform, ilv)) < 0)
1760  return ret;
1761  } else {
1762  if (s->rgb || s->bayer) {
1763  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1764  return ret;
1765  } else {
1767  point_transform,
1768  nb_components)) < 0)
1769  return ret;
1770  }
1771  }
1772  } else {
1773  if (s->progressive && predictor) {
1774  av_assert0(s->picture_ptr == s->picture);
1776  ilv, prev_shift,
1777  point_transform)) < 0)
1778  return ret;
1779  } else {
1780  if ((ret = mjpeg_decode_scan(s, nb_components,
1781  prev_shift, point_transform,
1782  mb_bitmask, mb_bitmask_size, reference)) < 0)
1783  return ret;
1784  }
1785  }
1786 
1787  if (s->interlaced &&
1788  get_bits_left(&s->gb) > 32 &&
1789  show_bits(&s->gb, 8) == 0xFF) {
1790  GetBitContext bak = s->gb;
1791  align_get_bits(&bak);
1792  if (show_bits(&bak, 16) == 0xFFD1) {
1793  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1794  s->gb = bak;
1795  skip_bits(&s->gb, 16);
1796  s->bottom_field ^= 1;
1797 
1798  goto next_field;
1799  }
1800  }
1801 
1802  emms_c();
1803  return 0;
1804  out_of_range:
1805  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1806  return AVERROR_INVALIDDATA;
1807 }
1808 
1810 {
1811  if (get_bits(&s->gb, 16) != 4)
1812  return AVERROR_INVALIDDATA;
1813  s->restart_interval = get_bits(&s->gb, 16);
1814  s->restart_count = 0;
1815  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1816  s->restart_interval);
1817 
1818  return 0;
1819 }
1820 
1822 {
1823  int len, id, i;
1824 
1825  len = get_bits(&s->gb, 16);
1826  if (len < 6) {
1827  if (s->bayer) {
1828  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1829  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1830  skip_bits(&s->gb, len);
1831  return 0;
1832  } else
1833  return AVERROR_INVALIDDATA;
1834  }
1835  if (8 * len > get_bits_left(&s->gb))
1836  return AVERROR_INVALIDDATA;
1837 
1838  id = get_bits_long(&s->gb, 32);
1839  len -= 6;
1840 
1841  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1842  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1843  av_fourcc2str(av_bswap32(id)), id, len);
1844 
1845  /* Buggy AVID, it puts EOI only at every 10th frame. */
1846  /* Also, this fourcc is used by non-avid files too, it holds some
1847  information, but it's always present in AVID-created files. */
1848  if (id == AV_RB32("AVI1")) {
1849  /* structure:
1850  4bytes AVI1
1851  1bytes polarity
1852  1bytes always zero
1853  4bytes field_size
1854  4bytes field_size_less_padding
1855  */
1856  s->buggy_avid = 1;
1857  i = get_bits(&s->gb, 8); len--;
1858  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1859  goto out;
1860  }
1861 
1862  if (id == AV_RB32("JFIF")) {
1863  int t_w, t_h, v1, v2;
1864  if (len < 8)
1865  goto out;
1866  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1867  v1 = get_bits(&s->gb, 8);
1868  v2 = get_bits(&s->gb, 8);
1869  skip_bits(&s->gb, 8);
1870 
1871  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1872  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1873  if ( s->avctx->sample_aspect_ratio.num <= 0
1874  || s->avctx->sample_aspect_ratio.den <= 0) {
1875  s->avctx->sample_aspect_ratio.num = 0;
1876  s->avctx->sample_aspect_ratio.den = 1;
1877  }
1878 
1879  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1880  av_log(s->avctx, AV_LOG_INFO,
1881  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1882  v1, v2,
1883  s->avctx->sample_aspect_ratio.num,
1884  s->avctx->sample_aspect_ratio.den);
1885 
1886  len -= 8;
1887  if (len >= 2) {
1888  t_w = get_bits(&s->gb, 8);
1889  t_h = get_bits(&s->gb, 8);
1890  if (t_w && t_h) {
1891  /* skip thumbnail */
1892  if (len -10 - (t_w * t_h * 3) > 0)
1893  len -= t_w * t_h * 3;
1894  }
1895  len -= 2;
1896  }
1897  goto out;
1898  }
1899 
1900  if ( id == AV_RB32("Adob")
1901  && len >= 7
1902  && show_bits(&s->gb, 8) == 'e'
1903  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1904  skip_bits(&s->gb, 8); /* 'e' */
1905  skip_bits(&s->gb, 16); /* version */
1906  skip_bits(&s->gb, 16); /* flags0 */
1907  skip_bits(&s->gb, 16); /* flags1 */
1908  s->adobe_transform = get_bits(&s->gb, 8);
1909  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1910  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1911  len -= 7;
1912  goto out;
1913  }
1914 
1915  if (id == AV_RB32("LJIF")) {
1916  int rgb = s->rgb;
1917  int pegasus_rct = s->pegasus_rct;
1918  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1919  av_log(s->avctx, AV_LOG_INFO,
1920  "Pegasus lossless jpeg header found\n");
1921  skip_bits(&s->gb, 16); /* version ? */
1922  skip_bits(&s->gb, 16); /* unknown always 0? */
1923  skip_bits(&s->gb, 16); /* unknown always 0? */
1924  skip_bits(&s->gb, 16); /* unknown always 0? */
1925  switch (i=get_bits(&s->gb, 8)) {
1926  case 1:
1927  rgb = 1;
1928  pegasus_rct = 0;
1929  break;
1930  case 2:
1931  rgb = 1;
1932  pegasus_rct = 1;
1933  break;
1934  default:
1935  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1936  }
1937 
1938  len -= 9;
1939  if (s->got_picture)
1940  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1941  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1942  goto out;
1943  }
1944 
1945  s->rgb = rgb;
1946  s->pegasus_rct = pegasus_rct;
1947 
1948  goto out;
1949  }
1950  if (id == AV_RL32("colr") && len > 0) {
1951  s->colr = get_bits(&s->gb, 8);
1952  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1953  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1954  len --;
1955  goto out;
1956  }
1957  if (id == AV_RL32("xfrm") && len > 0) {
1958  s->xfrm = get_bits(&s->gb, 8);
1959  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1960  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1961  len --;
1962  goto out;
1963  }
1964 
1965  /* JPS extension by VRex */
1966  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1967  int flags, layout, type;
1968  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1969  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1970 
1971  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1972  skip_bits(&s->gb, 16); len -= 2; /* block length */
1973  skip_bits(&s->gb, 8); /* reserved */
1974  flags = get_bits(&s->gb, 8);
1975  layout = get_bits(&s->gb, 8);
1976  type = get_bits(&s->gb, 8);
1977  len -= 4;
1978 
1979  av_freep(&s->stereo3d);
1980  s->stereo3d = av_stereo3d_alloc();
1981  if (!s->stereo3d) {
1982  goto out;
1983  }
1984  if (type == 0) {
1985  s->stereo3d->type = AV_STEREO3D_2D;
1986  } else if (type == 1) {
1987  switch (layout) {
1988  case 0x01:
1989  s->stereo3d->type = AV_STEREO3D_LINES;
1990  break;
1991  case 0x02:
1992  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1993  break;
1994  case 0x03:
1995  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1996  break;
1997  }
1998  if (!(flags & 0x04)) {
1999  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2000  }
2001  }
2002  goto out;
2003  }
2004 
2005  /* EXIF metadata */
2006  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2007  GetByteContext gbytes;
2008  int ret, le, ifd_offset, bytes_read;
2009  const uint8_t *aligned;
2010 
2011  skip_bits(&s->gb, 16); // skip padding
2012  len -= 2;
2013 
2014  // init byte wise reading
2015  aligned = align_get_bits(&s->gb);
2016  bytestream2_init(&gbytes, aligned, len);
2017 
2018  // read TIFF header
2019  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2020  if (ret) {
2021  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2022  } else {
2023  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2024 
2025  // read 0th IFD and store the metadata
2026  // (return values > 0 indicate the presence of subimage metadata)
2027  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2028  if (ret < 0) {
2029  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2030  }
2031  }
2032 
2033  bytes_read = bytestream2_tell(&gbytes);
2034  skip_bits(&s->gb, bytes_read << 3);
2035  len -= bytes_read;
2036 
2037  goto out;
2038  }
2039 
2040  /* Apple MJPEG-A */
2041  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2042  id = get_bits_long(&s->gb, 32);
2043  len -= 4;
2044  /* Apple MJPEG-A */
2045  if (id == AV_RB32("mjpg")) {
2046  /* structure:
2047  4bytes field size
2048  4bytes pad field size
2049  4bytes next off
2050  4bytes quant off
2051  4bytes huff off
2052  4bytes image off
2053  4bytes scan off
2054  4bytes data off
2055  */
2056  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2057  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2058  }
2059  }
2060 
2061  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2062  int id2;
2063  unsigned seqno;
2064  unsigned nummarkers;
2065 
2066  id = get_bits_long(&s->gb, 32);
2067  id2 = get_bits(&s->gb, 24);
2068  len -= 7;
2069  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2070  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2071  goto out;
2072  }
2073 
2074  skip_bits(&s->gb, 8);
2075  seqno = get_bits(&s->gb, 8);
2076  len -= 2;
2077  if (seqno == 0) {
2078  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2079  goto out;
2080  }
2081 
2082  nummarkers = get_bits(&s->gb, 8);
2083  len -= 1;
2084  if (nummarkers == 0) {
2085  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2086  goto out;
2087  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2088  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2089  goto out;
2090  } else if (seqno > nummarkers) {
2091  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2092  goto out;
2093  }
2094 
2095  /* Allocate if this is the first APP2 we've seen. */
2096  if (s->iccnum == 0) {
2097  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2098  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2099  return AVERROR(ENOMEM);
2100  }
2101  s->iccnum = nummarkers;
2102  }
2103 
2104  if (s->iccentries[seqno - 1].data) {
2105  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2106  goto out;
2107  }
2108 
2109  s->iccentries[seqno - 1].length = len;
2110  s->iccentries[seqno - 1].data = av_malloc(len);
2111  if (!s->iccentries[seqno - 1].data) {
2112  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2113  return AVERROR(ENOMEM);
2114  }
2115 
2116  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2117  skip_bits(&s->gb, len << 3);
2118  len = 0;
2119  s->iccread++;
2120 
2121  if (s->iccread > s->iccnum)
2122  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2123  }
2124 
2125 out:
2126  /* slow but needed for extreme adobe jpegs */
2127  if (len < 0)
2128  av_log(s->avctx, AV_LOG_ERROR,
2129  "mjpeg: error, decode_app parser read over the end\n");
2130  while (--len > 0)
2131  skip_bits(&s->gb, 8);
2132 
2133  return 0;
2134 }
2135 
2137 {
2138  int len = get_bits(&s->gb, 16);
2139  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2140  int i;
2141  char *cbuf = av_malloc(len - 1);
2142  if (!cbuf)
2143  return AVERROR(ENOMEM);
2144 
2145  for (i = 0; i < len - 2; i++)
2146  cbuf[i] = get_bits(&s->gb, 8);
2147  if (i > 0 && cbuf[i - 1] == '\n')
2148  cbuf[i - 1] = 0;
2149  else
2150  cbuf[i] = 0;
2151 
2152  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2153  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2154 
2155  /* buggy avid, it puts EOI only at every 10th frame */
2156  if (!strncmp(cbuf, "AVID", 4)) {
2157  parse_avid(s, cbuf, len);
2158  } else if (!strcmp(cbuf, "CS=ITU601"))
2159  s->cs_itu601 = 1;
2160  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2161  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2162  s->flipped = 1;
2163  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2164  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2165  s->multiscope = 2;
2166  }
2167 
2168  av_free(cbuf);
2169  }
2170 
2171  return 0;
2172 }
2173 
2174 /* return the 8 bit start code value and update the search
2175  state. Return -1 if no start code found */
2176 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2177 {
2178  const uint8_t *buf_ptr;
2179  unsigned int v, v2;
2180  int val;
2181  int skipped = 0;
2182 
2183  buf_ptr = *pbuf_ptr;
2184  while (buf_end - buf_ptr > 1) {
2185  v = *buf_ptr++;
2186  v2 = *buf_ptr;
2187  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2188  val = *buf_ptr++;
2189  goto found;
2190  }
2191  skipped++;
2192  }
2193  buf_ptr = buf_end;
2194  val = -1;
2195 found:
2196  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2197  *pbuf_ptr = buf_ptr;
2198  return val;
2199 }
2200 
2202  const uint8_t **buf_ptr, const uint8_t *buf_end,
2203  const uint8_t **unescaped_buf_ptr,
2204  int *unescaped_buf_size)
2205 {
2206  int start_code;
2207  start_code = find_marker(buf_ptr, buf_end);
2208 
2209  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2210  if (!s->buffer)
2211  return AVERROR(ENOMEM);
2212 
2213  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2214  if (start_code == SOS && !s->ls) {
2215  const uint8_t *src = *buf_ptr;
2216  const uint8_t *ptr = src;
2217  uint8_t *dst = s->buffer;
2218 
2219  #define copy_data_segment(skip) do { \
2220  ptrdiff_t length = (ptr - src) - (skip); \
2221  if (length > 0) { \
2222  memcpy(dst, src, length); \
2223  dst += length; \
2224  src = ptr; \
2225  } \
2226  } while (0)
2227 
2228  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2229  ptr = buf_end;
2230  copy_data_segment(0);
2231  } else {
2232  while (ptr < buf_end) {
2233  uint8_t x = *(ptr++);
2234 
2235  if (x == 0xff) {
2236  ptrdiff_t skip = 0;
2237  while (ptr < buf_end && x == 0xff) {
2238  x = *(ptr++);
2239  skip++;
2240  }
2241 
2242  /* 0xFF, 0xFF, ... */
2243  if (skip > 1) {
2244  copy_data_segment(skip);
2245 
2246  /* decrement src as it is equal to ptr after the
2247  * copy_data_segment macro and we might want to
2248  * copy the current value of x later on */
2249  src--;
2250  }
2251 
2252  if (x < RST0 || x > RST7) {
2253  copy_data_segment(1);
2254  if (x)
2255  break;
2256  }
2257  }
2258  }
2259  if (src < ptr)
2260  copy_data_segment(0);
2261  }
2262  #undef copy_data_segment
2263 
2264  *unescaped_buf_ptr = s->buffer;
2265  *unescaped_buf_size = dst - s->buffer;
2266  memset(s->buffer + *unescaped_buf_size, 0,
2268 
2269  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2270  (buf_end - *buf_ptr) - (dst - s->buffer));
2271  } else if (start_code == SOS && s->ls) {
2272  const uint8_t *src = *buf_ptr;
2273  uint8_t *dst = s->buffer;
2274  int bit_count = 0;
2275  int t = 0, b = 0;
2276  PutBitContext pb;
2277 
2278  /* find marker */
2279  while (src + t < buf_end) {
2280  uint8_t x = src[t++];
2281  if (x == 0xff) {
2282  while ((src + t < buf_end) && x == 0xff)
2283  x = src[t++];
2284  if (x & 0x80) {
2285  t -= FFMIN(2, t);
2286  break;
2287  }
2288  }
2289  }
2290  bit_count = t * 8;
2291  init_put_bits(&pb, dst, t);
2292 
2293  /* unescape bitstream */
2294  while (b < t) {
2295  uint8_t x = src[b++];
2296  put_bits(&pb, 8, x);
2297  if (x == 0xFF && b < t) {
2298  x = src[b++];
2299  if (x & 0x80) {
2300  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2301  x &= 0x7f;
2302  }
2303  put_bits(&pb, 7, x);
2304  bit_count--;
2305  }
2306  }
2307  flush_put_bits(&pb);
2308 
2309  *unescaped_buf_ptr = dst;
2310  *unescaped_buf_size = (bit_count + 7) >> 3;
2311  memset(s->buffer + *unescaped_buf_size, 0,
2313  } else {
2314  *unescaped_buf_ptr = *buf_ptr;
2315  *unescaped_buf_size = buf_end - *buf_ptr;
2316  }
2317 
2318  return start_code;
2319 }
2320 
2322 {
2323  int i;
2324 
2325  if (s->iccentries) {
2326  for (i = 0; i < s->iccnum; i++)
2327  av_freep(&s->iccentries[i].data);
2328  av_freep(&s->iccentries);
2329  }
2330 
2331  s->iccread = 0;
2332  s->iccnum = 0;
2333 }
2334 
2335 // SMV JPEG just stacks several output frames into one JPEG picture
2336 // we handle that by setting up the cropping parameters appropriately
2338 {
2339  MJpegDecodeContext *s = avctx->priv_data;
2340  int ret;
2341 
2342  if (s->smv_next_frame > 0) {
2343  av_assert0(s->smv_frame->buf[0]);
2345  ret = av_frame_ref(frame, s->smv_frame);
2346  if (ret < 0)
2347  return ret;
2348  } else {
2349  av_assert0(frame->buf[0]);
2350  av_frame_unref(s->smv_frame);
2351  ret = av_frame_ref(s->smv_frame, frame);
2352  if (ret < 0)
2353  return ret;
2354  }
2355 
2356  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2357 
2358  frame->width = avctx->coded_width;
2359  frame->height = avctx->coded_height;
2360  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2361  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2362 
2363  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2364 
2365  if (s->smv_next_frame == 0)
2366  av_frame_unref(s->smv_frame);
2367 
2368  return 0;
2369 }
2370 
2372 {
2373  MJpegDecodeContext *s = avctx->priv_data;
2374  int ret;
2375 
2376  av_packet_unref(s->pkt);
2377  ret = ff_decode_get_packet(avctx, s->pkt);
2378  if (ret < 0)
2379  return ret;
2380 
2381 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2382  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2383  avctx->codec_id == AV_CODEC_ID_AMV) {
2384  ret = ff_sp5x_process_packet(avctx, s->pkt);
2385  if (ret < 0)
2386  return ret;
2387  }
2388 #endif
2389 
2390  s->buf_size = s->pkt->size;
2391 
2392  return 0;
2393 }
2394 
2396 {
2397  MJpegDecodeContext *s = avctx->priv_data;
2398  const uint8_t *buf_end, *buf_ptr;
2399  const uint8_t *unescaped_buf_ptr;
2400  int hshift, vshift;
2401  int unescaped_buf_size;
2402  int start_code;
2403  int i, index;
2404  int ret = 0;
2405  int is16bit;
2406 
2407  s->force_pal8 = 0;
2408 
2409  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2410  return smv_process_frame(avctx, frame);
2411 
2412  av_dict_free(&s->exif_metadata);
2413  av_freep(&s->stereo3d);
2414  s->adobe_transform = -1;
2415 
2416  if (s->iccnum != 0)
2418 
2419  ret = mjpeg_get_packet(avctx);
2420  if (ret < 0)
2421  return ret;
2422 redo_for_pal8:
2423  buf_ptr = s->pkt->data;
2424  buf_end = s->pkt->data + s->pkt->size;
2425  while (buf_ptr < buf_end) {
2426  /* find start next marker */
2427  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2428  &unescaped_buf_ptr,
2429  &unescaped_buf_size);
2430  /* EOF */
2431  if (start_code < 0) {
2432  break;
2433  } else if (unescaped_buf_size > INT_MAX / 8) {
2434  av_log(avctx, AV_LOG_ERROR,
2435  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2436  start_code, unescaped_buf_size, s->pkt->size);
2437  return AVERROR_INVALIDDATA;
2438  }
2439  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2440  start_code, buf_end - buf_ptr);
2441 
2442  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2443 
2444  if (ret < 0) {
2445  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2446  goto fail;
2447  }
2448 
2449  s->start_code = start_code;
2450  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2451  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2452 
2453  /* process markers */
2454  if (start_code >= RST0 && start_code <= RST7) {
2455  av_log(avctx, AV_LOG_DEBUG,
2456  "restart marker: %d\n", start_code & 0x0f);
2457  /* APP fields */
2458  } else if (start_code >= APP0 && start_code <= APP15) {
2459  if ((ret = mjpeg_decode_app(s)) < 0)
2460  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2461  av_err2str(ret));
2462  /* Comment */
2463  } else if (start_code == COM) {
2464  ret = mjpeg_decode_com(s);
2465  if (ret < 0)
2466  return ret;
2467  } else if (start_code == DQT) {
2469  if (ret < 0)
2470  return ret;
2471  }
2472 
2473  ret = -1;
2474 
2475  if (!CONFIG_JPEGLS_DECODER &&
2476  (start_code == SOF48 || start_code == LSE)) {
2477  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2478  return AVERROR(ENOSYS);
2479  }
2480 
2481  if (avctx->skip_frame == AVDISCARD_ALL) {
2482  switch(start_code) {
2483  case SOF0:
2484  case SOF1:
2485  case SOF2:
2486  case SOF3:
2487  case SOF48:
2488  case SOI:
2489  case SOS:
2490  case EOI:
2491  break;
2492  default:
2493  goto skip;
2494  }
2495  }
2496 
2497  switch (start_code) {
2498  case SOI:
2499  s->restart_interval = 0;
2500  s->restart_count = 0;
2501  s->raw_image_buffer = buf_ptr;
2502  s->raw_image_buffer_size = buf_end - buf_ptr;
2503  /* nothing to do on SOI */
2504  break;
2505  case DHT:
2506  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2507  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2508  goto fail;
2509  }
2510  break;
2511  case SOF0:
2512  case SOF1:
2513  if (start_code == SOF0)
2514  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2515  else
2517  s->lossless = 0;
2518  s->ls = 0;
2519  s->progressive = 0;
2520  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2521  goto fail;
2522  break;
2523  case SOF2:
2524  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2525  s->lossless = 0;
2526  s->ls = 0;
2527  s->progressive = 1;
2528  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2529  goto fail;
2530  break;
2531  case SOF3:
2532  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2533  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2534  s->lossless = 1;
2535  s->ls = 0;
2536  s->progressive = 0;
2537  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2538  goto fail;
2539  break;
2540  case SOF48:
2541  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2542  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2543  s->lossless = 1;
2544  s->ls = 1;
2545  s->progressive = 0;
2546  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2547  goto fail;
2548  break;
2549  case LSE:
2550  if (!CONFIG_JPEGLS_DECODER ||
2551  (ret = ff_jpegls_decode_lse(s)) < 0)
2552  goto fail;
2553  if (ret == 1)
2554  goto redo_for_pal8;
2555  break;
2556  case EOI:
2557 eoi_parser:
2558  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2559  s->progressive && s->cur_scan && s->got_picture)
2561  s->cur_scan = 0;
2562  if (!s->got_picture) {
2563  av_log(avctx, AV_LOG_WARNING,
2564  "Found EOI before any SOF, ignoring\n");
2565  break;
2566  }
2567  if (s->interlaced) {
2568  s->bottom_field ^= 1;
2569  /* if not bottom field, do not output image yet */
2570  if (s->bottom_field == !s->interlace_polarity)
2571  break;
2572  }
2573  if (avctx->skip_frame == AVDISCARD_ALL) {
2574  s->got_picture = 0;
2575  ret = AVERROR(EAGAIN);
2576  goto the_end_no_picture;
2577  }
2578  if (s->avctx->hwaccel) {
2579  ret = s->avctx->hwaccel->end_frame(s->avctx);
2580  if (ret < 0)
2581  return ret;
2582 
2583  av_freep(&s->hwaccel_picture_private);
2584  }
2585  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2586  return ret;
2587  s->got_picture = 0;
2588 
2589  frame->pkt_dts = s->pkt->dts;
2590 
2591  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2592  int qp = FFMAX3(s->qscale[0],
2593  s->qscale[1],
2594  s->qscale[2]);
2595 
2596  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2597  }
2598 
2599  goto the_end;
2600  case SOS:
2601  s->raw_scan_buffer = buf_ptr;
2602  s->raw_scan_buffer_size = buf_end - buf_ptr;
2603 
2604  s->cur_scan++;
2605  if (avctx->skip_frame == AVDISCARD_ALL) {
2606  skip_bits(&s->gb, get_bits_left(&s->gb));
2607  break;
2608  }
2609 
2610  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2611  (avctx->err_recognition & AV_EF_EXPLODE))
2612  goto fail;
2613  break;
2614  case DRI:
2615  if ((ret = mjpeg_decode_dri(s)) < 0)
2616  return ret;
2617  break;
2618  case SOF5:
2619  case SOF6:
2620  case SOF7:
2621  case SOF9:
2622  case SOF10:
2623  case SOF11:
2624  case SOF13:
2625  case SOF14:
2626  case SOF15:
2627  case JPG:
2628  av_log(avctx, AV_LOG_ERROR,
2629  "mjpeg: unsupported coding type (%x)\n", start_code);
2630  break;
2631  }
2632 
2633 skip:
2634  /* eof process start code */
2635  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2636  av_log(avctx, AV_LOG_DEBUG,
2637  "marker parser used %d bytes (%d bits)\n",
2638  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2639  }
2640  if (s->got_picture && s->cur_scan) {
2641  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2642  goto eoi_parser;
2643  }
2644  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2645  return AVERROR_INVALIDDATA;
2646 fail:
2647  s->got_picture = 0;
2648  return ret;
2649 the_end:
2650 
2651  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2652 
2653  if (AV_RB32(s->upscale_h)) {
2654  int p;
2656  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2657  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2658  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2659  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2660  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2661  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2662  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2663  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2664  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2665  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2666  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2667  );
2668  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2669  if (ret)
2670  return ret;
2671 
2672  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2673  for (p = 0; p<s->nb_components; p++) {
2674  uint8_t *line = s->picture_ptr->data[p];
2675  int w = s->width;
2676  int h = s->height;
2677  if (!s->upscale_h[p])
2678  continue;
2679  if (p==1 || p==2) {
2680  w = AV_CEIL_RSHIFT(w, hshift);
2681  h = AV_CEIL_RSHIFT(h, vshift);
2682  }
2683  if (s->upscale_v[p] == 1)
2684  h = (h+1)>>1;
2685  av_assert0(w > 0);
2686  for (i = 0; i < h; i++) {
2687  if (s->upscale_h[p] == 1) {
2688  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2689  else line[w - 1] = line[(w - 1) / 2];
2690  for (index = w - 2; index > 0; index--) {
2691  if (is16bit)
2692  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2693  else
2694  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2695  }
2696  } else if (s->upscale_h[p] == 2) {
2697  if (is16bit) {
2698  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2699  if (w > 1)
2700  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2701  } else {
2702  line[w - 1] = line[(w - 1) / 3];
2703  if (w > 1)
2704  line[w - 2] = line[w - 1];
2705  }
2706  for (index = w - 3; index > 0; index--) {
2707  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2708  }
2709  }
2710  line += s->linesize[p];
2711  }
2712  }
2713  }
2714  if (AV_RB32(s->upscale_v)) {
2715  int p;
2717  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2719  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2720  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2721  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2722  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2727  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2728  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2729  );
2730  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2731  if (ret)
2732  return ret;
2733 
2734  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2735  for (p = 0; p < s->nb_components; p++) {
2736  uint8_t *dst;
2737  int w = s->width;
2738  int h = s->height;
2739  if (!s->upscale_v[p])
2740  continue;
2741  if (p==1 || p==2) {
2742  w = AV_CEIL_RSHIFT(w, hshift);
2743  h = AV_CEIL_RSHIFT(h, vshift);
2744  }
2745  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2746  for (i = h - 1; i; i--) {
2747  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2748  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2749  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2750  memcpy(dst, src1, w);
2751  } else {
2752  for (index = 0; index < w; index++)
2753  dst[index] = (src1[index] + src2[index]) >> 1;
2754  }
2755  dst -= s->linesize[p];
2756  }
2757  }
2758  }
2759  if (s->flipped && !s->rgb) {
2760  int j;
2761  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2762  if (ret)
2763  return ret;
2764 
2765  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2766  for (index=0; index<s->nb_components; index++) {
2767  uint8_t *dst = s->picture_ptr->data[index];
2768  int w = s->picture_ptr->width;
2769  int h = s->picture_ptr->height;
2770  if(index && index<3){
2771  w = AV_CEIL_RSHIFT(w, hshift);
2772  h = AV_CEIL_RSHIFT(h, vshift);
2773  }
2774  if(dst){
2775  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2776  for (i=0; i<h/2; i++) {
2777  for (j=0; j<w; j++)
2778  FFSWAP(int, dst[j], dst2[j]);
2779  dst += s->picture_ptr->linesize[index];
2780  dst2 -= s->picture_ptr->linesize[index];
2781  }
2782  }
2783  }
2784  }
2785  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2786  int w = s->picture_ptr->width;
2787  int h = s->picture_ptr->height;
2788  av_assert0(s->nb_components == 4);
2789  for (i=0; i<h; i++) {
2790  int j;
2791  uint8_t *dst[4];
2792  for (index=0; index<4; index++) {
2793  dst[index] = s->picture_ptr->data[index]
2794  + s->picture_ptr->linesize[index]*i;
2795  }
2796  for (j=0; j<w; j++) {
2797  int k = dst[3][j];
2798  int r = dst[0][j] * k;
2799  int g = dst[1][j] * k;
2800  int b = dst[2][j] * k;
2801  dst[0][j] = g*257 >> 16;
2802  dst[1][j] = b*257 >> 16;
2803  dst[2][j] = r*257 >> 16;
2804  dst[3][j] = 255;
2805  }
2806  }
2807  }
2808  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2809  int w = s->picture_ptr->width;
2810  int h = s->picture_ptr->height;
2811  av_assert0(s->nb_components == 4);
2812  for (i=0; i<h; i++) {
2813  int j;
2814  uint8_t *dst[4];
2815  for (index=0; index<4; index++) {
2816  dst[index] = s->picture_ptr->data[index]
2817  + s->picture_ptr->linesize[index]*i;
2818  }
2819  for (j=0; j<w; j++) {
2820  int k = dst[3][j];
2821  int r = (255 - dst[0][j]) * k;
2822  int g = (128 - dst[1][j]) * k;
2823  int b = (128 - dst[2][j]) * k;
2824  dst[0][j] = r*257 >> 16;
2825  dst[1][j] = (g*257 >> 16) + 128;
2826  dst[2][j] = (b*257 >> 16) + 128;
2827  dst[3][j] = 255;
2828  }
2829  }
2830  }
2831 
2832  if (s->stereo3d) {
2834  if (stereo) {
2835  stereo->type = s->stereo3d->type;
2836  stereo->flags = s->stereo3d->flags;
2837  }
2838  av_freep(&s->stereo3d);
2839  }
2840 
2841  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2842  AVFrameSideData *sd;
2843  size_t offset = 0;
2844  int total_size = 0;
2845  int i;
2846 
2847  /* Sum size of all parts. */
2848  for (i = 0; i < s->iccnum; i++)
2849  total_size += s->iccentries[i].length;
2850 
2852  if (!sd) {
2853  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2854  return AVERROR(ENOMEM);
2855  }
2856 
2857  /* Reassemble the parts, which are now in-order. */
2858  for (i = 0; i < s->iccnum; i++) {
2859  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2860  offset += s->iccentries[i].length;
2861  }
2862  }
2863 
2864  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2865  av_dict_free(&s->exif_metadata);
2866 
2867  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2868  ret = smv_process_frame(avctx, frame);
2869  if (ret < 0) {
2871  return ret;
2872  }
2873  }
2874  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2875  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2876  avctx->coded_height > s->orig_height) {
2877  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2878  frame->crop_top = frame->height - avctx->height;
2879  }
2880 
2881  ret = 0;
2882 
2883 the_end_no_picture:
2884  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2885  buf_end - buf_ptr);
2886 
2887  return ret;
2888 }
2889 
2890 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2891  * even without having called ff_mjpeg_decode_init(). */
2893 {
2894  MJpegDecodeContext *s = avctx->priv_data;
2895  int i, j;
2896 
2897  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2898  av_log(avctx, AV_LOG_INFO, "Single field\n");
2899  }
2900 
2901  if (s->picture) {
2902  av_frame_free(&s->picture);
2903  s->picture_ptr = NULL;
2904  } else if (s->picture_ptr)
2905  av_frame_unref(s->picture_ptr);
2906 
2907  av_packet_free(&s->pkt);
2908 
2909  av_frame_free(&s->smv_frame);
2910 
2911  av_freep(&s->buffer);
2912  av_freep(&s->stereo3d);
2913  av_freep(&s->ljpeg_buffer);
2914  s->ljpeg_buffer_size = 0;
2915 
2916  for (i = 0; i < 3; i++) {
2917  for (j = 0; j < 4; j++)
2918  ff_free_vlc(&s->vlcs[i][j]);
2919  }
2920  for (i = 0; i < MAX_COMPONENTS; i++) {
2921  av_freep(&s->blocks[i]);
2922  av_freep(&s->last_nnz[i]);
2923  }
2924  av_dict_free(&s->exif_metadata);
2925 
2927 
2928  av_freep(&s->hwaccel_picture_private);
2929  av_freep(&s->jls_state);
2930 
2931  return 0;
2932 }
2933 
2934 static void decode_flush(AVCodecContext *avctx)
2935 {
2936  MJpegDecodeContext *s = avctx->priv_data;
2937  s->got_picture = 0;
2938 
2939  s->smv_next_frame = 0;
2940  av_frame_unref(s->smv_frame);
2941 }
2942 
2943 #if CONFIG_MJPEG_DECODER
2944 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2945 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2946 static const AVOption options[] = {
2947  { "extern_huff", "Use external huffman table.",
2948  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2949  { NULL },
2950 };
2951 
2952 static const AVClass mjpegdec_class = {
2953  .class_name = "MJPEG decoder",
2954  .item_name = av_default_item_name,
2955  .option = options,
2956  .version = LIBAVUTIL_VERSION_INT,
2957 };
2958 
2959 const AVCodec ff_mjpeg_decoder = {
2960  .name = "mjpeg",
2961  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2962  .type = AVMEDIA_TYPE_VIDEO,
2963  .id = AV_CODEC_ID_MJPEG,
2964  .priv_data_size = sizeof(MJpegDecodeContext),
2966  .close = ff_mjpeg_decode_end,
2968  .flush = decode_flush,
2969  .capabilities = AV_CODEC_CAP_DR1,
2970  .max_lowres = 3,
2971  .priv_class = &mjpegdec_class,
2975  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2976 #if CONFIG_MJPEG_NVDEC_HWACCEL
2977  HWACCEL_NVDEC(mjpeg),
2978 #endif
2979 #if CONFIG_MJPEG_VAAPI_HWACCEL
2980  HWACCEL_VAAPI(mjpeg),
2981 #endif
2982  NULL
2983  },
2984 };
2985 #endif
2986 #if CONFIG_THP_DECODER
2987 const AVCodec ff_thp_decoder = {
2988  .name = "thp",
2989  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2990  .type = AVMEDIA_TYPE_VIDEO,
2991  .id = AV_CODEC_ID_THP,
2992  .priv_data_size = sizeof(MJpegDecodeContext),
2994  .close = ff_mjpeg_decode_end,
2996  .flush = decode_flush,
2997  .capabilities = AV_CODEC_CAP_DR1,
2998  .max_lowres = 3,
3001 };
3002 #endif
3003 
3004 #if CONFIG_SMVJPEG_DECODER
3005 const AVCodec ff_smvjpeg_decoder = {
3006  .name = "smvjpeg",
3007  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
3008  .type = AVMEDIA_TYPE_VIDEO,
3009  .id = AV_CODEC_ID_SMVJPEG,
3010  .priv_data_size = sizeof(MJpegDecodeContext),
3012  .close = ff_mjpeg_decode_end,
3014  .flush = decode_flush,
3015  .capabilities = AV_CODEC_CAP_DR1,
3018 };
3019 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:102
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:417
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1355
AVCodec
AVCodec.
Definition: codec.h:197
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:292
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:224
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:57
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:603
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:956
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1083
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1392
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2934
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:954
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1320
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:707
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:547
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:108
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:191
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
index
fg index
Definition: ffmpeg_filter.c:168
AVFrame::width
int width
Definition: frame.h:361
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:431
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1625
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2337
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:985
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:788
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:143
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:196
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:798
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2371
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:179
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:150
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1299
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:216
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2580
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:508
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
rgb
Definition: rpzaenc.c:59
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:238
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1238
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1408
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:388
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:117
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1669
fail
#define fail()
Definition: checkasm.h:136
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:433
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1621
GetBitContext
Definition: get_bits.h:62
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2136
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:54
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:459
val
static double val(void *priv, double ch)
Definition: aeval.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2568
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:567
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:373
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:62
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
aligned
static int aligned(int val)
Definition: dashdec.c:169
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:855
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:401
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1819
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1623
mask
static const uint16_t mask[17]
Definition: lzw.c:38
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1037
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:150
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:481
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:97
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:402
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1624
g
const char * g
Definition: vf_curves.c:117
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:357
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:353
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:400
ff_thp_decoder
const AVCodec ff_thp_decoder
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2321
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2892
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:49
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:389
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2395
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:408
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:379
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:60
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:111
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:191
ff_smvjpeg_decoder
const AVCodec ff_smvjpeg_decoder
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:380
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1596
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:195
SOF13
@ SOF13
Definition: mjpeg.h:52
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:555
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:200
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1423
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:972
lowres
static int lowres
Definition: ffplay.c:334
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1544
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1331
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:68
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1428
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:507
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1648
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1072
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:116
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:327
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:873
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1631
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:263
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:139
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1110
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:322
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:164
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2176
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
src1
#define src1
Definition: h264pred.c:140
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2208
i
int i
Definition: input.c:406
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:806
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:480
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:447
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1622
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1809
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:129
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1306
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:437
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:970
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:552
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:589
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:559
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:946
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:694
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1303
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2201
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:157
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:379
AVFrame::height
int height
Definition: frame.h:361
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:212
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:598
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:262
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:429
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1298
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:299
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:567
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
SOI
@ SOI
Definition: mjpeg.h:70
ff_mjpeg_decoder
const AVCodec ff_mjpeg_decoder
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1821
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1019
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:404
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:406
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:540
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:82