FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "decode.h"
40 #include "hwconfig.h"
41 #include "idctdsp.h"
42 #include "internal.h"
43 #include "jpegtables.h"
44 #include "mjpeg.h"
45 #include "mjpegdec.h"
46 #include "jpeglsdec.h"
47 #include "profiles.h"
48 #include "put_bits.h"
49 #include "tiff.h"
50 #include "exif.h"
51 #include "bytestream.h"
52 
53 
55 {
56  static const struct {
57  int class;
58  int index;
59  const uint8_t *bits;
60  const uint8_t *values;
61  int length;
62  } ht[] = {
64  avpriv_mjpeg_val_dc, 12 },
66  avpriv_mjpeg_val_dc, 12 },
75  };
76  int i, ret;
77 
78  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
79  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
80  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
81  ht[i].bits, ht[i].values,
82  ht[i].class == 1, s->avctx);
83  if (ret < 0)
84  return ret;
85 
86  if (ht[i].class < 2) {
87  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
88  ht[i].bits + 1, 16);
89  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
90  ht[i].values, ht[i].length);
91  }
92  }
93 
94  return 0;
95 }
96 
97 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
98 {
99  s->buggy_avid = 1;
100  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
101  s->interlace_polarity = 1;
102  if (len > 14 && buf[12] == 2) /* 2 - PAL */
103  s->interlace_polarity = 0;
104  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
105  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
106 }
107 
108 static void init_idct(AVCodecContext *avctx)
109 {
110  MJpegDecodeContext *s = avctx->priv_data;
111 
112  ff_idctdsp_init(&s->idsp, avctx);
113  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
115 }
116 
118 {
119  MJpegDecodeContext *s = avctx->priv_data;
120  int ret;
121 
122  if (!s->picture_ptr) {
123  s->picture = av_frame_alloc();
124  if (!s->picture)
125  return AVERROR(ENOMEM);
126  s->picture_ptr = s->picture;
127  }
128 
129  s->pkt = av_packet_alloc();
130  if (!s->pkt)
131  return AVERROR(ENOMEM);
132 
133  s->avctx = avctx;
134  ff_blockdsp_init(&s->bdsp, avctx);
135  ff_hpeldsp_init(&s->hdsp, avctx->flags);
136  init_idct(avctx);
137  s->buffer_size = 0;
138  s->buffer = NULL;
139  s->start_code = -1;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
153  return ret;
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
162  s->interlace_polarity = 1; /* bottom field first */
163  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
164  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
165  if (avctx->codec_tag == AV_RL32("MJPG"))
166  s->interlace_polarity = 1;
167  }
168 
169  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
170  if (avctx->extradata_size >= 4)
171  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
172 
173  if (s->smv_frames_per_jpeg <= 0) {
174  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
175  return AVERROR_INVALIDDATA;
176  }
177 
178  s->smv_frame = av_frame_alloc();
179  if (!s->smv_frame)
180  return AVERROR(ENOMEM);
181  } else if (avctx->extradata_size > 8
182  && AV_RL32(avctx->extradata) == 0x2C
183  && AV_RL32(avctx->extradata+4) == 0x18) {
184  parse_avid(s, avctx->extradata, avctx->extradata_size);
185  }
186 
187  if (avctx->codec->id == AV_CODEC_ID_AMV)
188  s->flipped = 1;
189 
190  return 0;
191 }
192 
193 
194 /* quantize tables */
196 {
197  int len, index, i;
198 
199  len = get_bits(&s->gb, 16) - 2;
200 
201  if (8*len > get_bits_left(&s->gb)) {
202  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  while (len >= 65) {
207  int pr = get_bits(&s->gb, 4);
208  if (pr > 1) {
209  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
210  return AVERROR_INVALIDDATA;
211  }
212  index = get_bits(&s->gb, 4);
213  if (index >= 4)
214  return -1;
215  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
216  /* read quant table */
217  for (i = 0; i < 64; i++) {
218  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
219  if (s->quant_matrixes[index][i] == 0) {
220  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
221  return AVERROR_INVALIDDATA;
222  }
223  }
224 
225  // XXX FIXME fine-tune, and perhaps add dc too
226  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
227  s->quant_matrixes[index][8]) >> 1;
228  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
229  index, s->qscale[index]);
230  len -= 1 + 64 * (1+pr);
231  }
232  return 0;
233 }
234 
235 /* decode huffman tables and build VLC decoders */
237 {
238  int len, index, i, class, n, v;
239  uint8_t bits_table[17];
240  uint8_t val_table[256];
241  int ret = 0;
242 
243  len = get_bits(&s->gb, 16) - 2;
244 
245  if (8*len > get_bits_left(&s->gb)) {
246  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
247  return AVERROR_INVALIDDATA;
248  }
249 
250  while (len > 0) {
251  if (len < 17)
252  return AVERROR_INVALIDDATA;
253  class = get_bits(&s->gb, 4);
254  if (class >= 2)
255  return AVERROR_INVALIDDATA;
256  index = get_bits(&s->gb, 4);
257  if (index >= 4)
258  return AVERROR_INVALIDDATA;
259  n = 0;
260  for (i = 1; i <= 16; i++) {
261  bits_table[i] = get_bits(&s->gb, 8);
262  n += bits_table[i];
263  }
264  len -= 17;
265  if (len < n || n > 256)
266  return AVERROR_INVALIDDATA;
267 
268  for (i = 0; i < n; i++) {
269  v = get_bits(&s->gb, 8);
270  val_table[i] = v;
271  }
272  len -= n;
273 
274  /* build VLC and flush previous vlc if present */
275  ff_free_vlc(&s->vlcs[class][index]);
276  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
277  class, index, n);
278  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
279  val_table, class > 0, s->avctx)) < 0)
280  return ret;
281 
282  if (class > 0) {
283  ff_free_vlc(&s->vlcs[2][index]);
284  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
285  val_table, 0, s->avctx)) < 0)
286  return ret;
287  }
288 
289  for (i = 0; i < 16; i++)
290  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
291  for (i = 0; i < 256; i++)
292  s->raw_huffman_values[class][index][i] = val_table[i];
293  }
294  return 0;
295 }
296 
298 {
299  int len, nb_components, i, width, height, bits, ret, size_change;
300  unsigned pix_fmt_id;
301  int h_count[MAX_COMPONENTS] = { 0 };
302  int v_count[MAX_COMPONENTS] = { 0 };
303 
304  s->cur_scan = 0;
305  memset(s->upscale_h, 0, sizeof(s->upscale_h));
306  memset(s->upscale_v, 0, sizeof(s->upscale_v));
307 
308  len = get_bits(&s->gb, 16);
309  bits = get_bits(&s->gb, 8);
310 
311  if (bits > 16 || bits < 1) {
312  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
313  return AVERROR_INVALIDDATA;
314  }
315 
316  if (s->avctx->bits_per_raw_sample != bits) {
317  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
318  s->avctx->bits_per_raw_sample = bits;
319  init_idct(s->avctx);
320  }
321  if (s->pegasus_rct)
322  bits = 9;
323  if (bits == 9 && !s->pegasus_rct)
324  s->rct = 1; // FIXME ugly
325 
326  if(s->lossless && s->avctx->lowres){
327  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
328  return -1;
329  }
330 
331  height = get_bits(&s->gb, 16);
332  width = get_bits(&s->gb, 16);
333 
334  // HACK for odd_height.mov
335  if (s->interlaced && s->width == width && s->height == height + 1)
336  height= s->height;
337 
338  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
339  if (av_image_check_size(width, height, 0, s->avctx) < 0)
340  return AVERROR_INVALIDDATA;
341  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
342  return AVERROR_INVALIDDATA;
343 
344  nb_components = get_bits(&s->gb, 8);
345  if (nb_components <= 0 ||
346  nb_components > MAX_COMPONENTS)
347  return -1;
348  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
349  if (nb_components != s->nb_components) {
350  av_log(s->avctx, AV_LOG_ERROR,
351  "nb_components changing in interlaced picture\n");
352  return AVERROR_INVALIDDATA;
353  }
354  }
355  if (s->ls && !(bits <= 8 || nb_components == 1)) {
357  "JPEG-LS that is not <= 8 "
358  "bits/component or 16-bit gray");
359  return AVERROR_PATCHWELCOME;
360  }
361  if (len != 8 + 3 * nb_components) {
362  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
363  return AVERROR_INVALIDDATA;
364  }
365 
366  s->nb_components = nb_components;
367  s->h_max = 1;
368  s->v_max = 1;
369  for (i = 0; i < nb_components; i++) {
370  /* component id */
371  s->component_id[i] = get_bits(&s->gb, 8) - 1;
372  h_count[i] = get_bits(&s->gb, 4);
373  v_count[i] = get_bits(&s->gb, 4);
374  /* compute hmax and vmax (only used in interleaved case) */
375  if (h_count[i] > s->h_max)
376  s->h_max = h_count[i];
377  if (v_count[i] > s->v_max)
378  s->v_max = v_count[i];
379  s->quant_index[i] = get_bits(&s->gb, 8);
380  if (s->quant_index[i] >= 4) {
381  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
382  return AVERROR_INVALIDDATA;
383  }
384  if (!h_count[i] || !v_count[i]) {
385  av_log(s->avctx, AV_LOG_ERROR,
386  "Invalid sampling factor in component %d %d:%d\n",
387  i, h_count[i], v_count[i]);
388  return AVERROR_INVALIDDATA;
389  }
390 
391  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
392  i, h_count[i], v_count[i],
393  s->component_id[i], s->quant_index[i]);
394  }
395  if ( nb_components == 4
396  && s->component_id[0] == 'C' - 1
397  && s->component_id[1] == 'M' - 1
398  && s->component_id[2] == 'Y' - 1
399  && s->component_id[3] == 'K' - 1)
400  s->adobe_transform = 0;
401 
402  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
403  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
404  return AVERROR_PATCHWELCOME;
405  }
406 
407  if (s->bayer) {
408  if (nb_components == 2) {
409  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
410  width stored in their SOF3 markers is the width of each one. We only output
411  a single component, therefore we need to adjust the output image width. We
412  handle the deinterleaving (but not the debayering) in this file. */
413  width *= 2;
414  }
415  /* They can also contain 1 component, which is double the width and half the height
416  of the final image (rows are interleaved). We don't handle the decoding in this
417  file, but leave that to the TIFF/DNG decoder. */
418  }
419 
420  /* if different size, realloc/alloc picture */
421  if (width != s->width || height != s->height || bits != s->bits ||
422  memcmp(s->h_count, h_count, sizeof(h_count)) ||
423  memcmp(s->v_count, v_count, sizeof(v_count))) {
424  size_change = 1;
425 
426  s->width = width;
427  s->height = height;
428  s->bits = bits;
429  memcpy(s->h_count, h_count, sizeof(h_count));
430  memcpy(s->v_count, v_count, sizeof(v_count));
431  s->interlaced = 0;
432  s->got_picture = 0;
433 
434  /* test interlaced mode */
435  if (s->first_picture &&
436  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
437  s->orig_height != 0 &&
438  s->height < ((s->orig_height * 3) / 4)) {
439  s->interlaced = 1;
440  s->bottom_field = s->interlace_polarity;
441  s->picture_ptr->interlaced_frame = 1;
442  s->picture_ptr->top_field_first = !s->interlace_polarity;
443  height *= 2;
444  }
445 
446  ret = ff_set_dimensions(s->avctx, width, height);
447  if (ret < 0)
448  return ret;
449 
450  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
451  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
452  s->orig_height < height)
453  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
454 
455  s->first_picture = 0;
456  } else {
457  size_change = 0;
458  }
459 
460  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
461  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
462  if (s->avctx->height <= 0)
463  return AVERROR_INVALIDDATA;
464  }
465 
466  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
467  if (s->progressive) {
468  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
469  return AVERROR_INVALIDDATA;
470  }
471  } else {
472  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
473  s->rgb = 1;
474  else if (!s->lossless)
475  s->rgb = 0;
476  /* XXX: not complete test ! */
477  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
478  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
479  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
480  (s->h_count[3] << 4) | s->v_count[3];
481  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
482  /* NOTE we do not allocate pictures large enough for the possible
483  * padding of h/v_count being 4 */
484  if (!(pix_fmt_id & 0xD0D0D0D0))
485  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
486  if (!(pix_fmt_id & 0x0D0D0D0D))
487  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
488 
489  for (i = 0; i < 8; i++) {
490  int j = 6 + (i&1) - (i&6);
491  int is = (pix_fmt_id >> (4*i)) & 0xF;
492  int js = (pix_fmt_id >> (4*j)) & 0xF;
493 
494  if (is == 1 && js != 2 && (i < 2 || i > 5))
495  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
496  if (is == 1 && js != 2 && (i < 2 || i > 5))
497  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
498 
499  if (is == 1 && js == 2) {
500  if (i & 1) s->upscale_h[j/2] = 1;
501  else s->upscale_v[j/2] = 1;
502  }
503  }
504 
505  if (s->bayer) {
506  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
507  goto unk_pixfmt;
508  }
509 
510  switch (pix_fmt_id) {
511  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
512  if (!s->bayer)
513  goto unk_pixfmt;
514  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
515  break;
516  case 0x11111100:
517  if (s->rgb)
518  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
519  else {
520  if ( s->adobe_transform == 0
521  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
522  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
523  } else {
524  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
525  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
526  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
527  }
528  }
529  av_assert0(s->nb_components == 3);
530  break;
531  case 0x11111111:
532  if (s->rgb)
533  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
534  else {
535  if (s->adobe_transform == 0 && s->bits <= 8) {
536  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
537  } else {
538  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
539  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
540  }
541  }
542  av_assert0(s->nb_components == 4);
543  break;
544  case 0x22111122:
545  case 0x22111111:
546  if (s->adobe_transform == 0 && s->bits <= 8) {
547  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
548  s->upscale_v[1] = s->upscale_v[2] = 1;
549  s->upscale_h[1] = s->upscale_h[2] = 1;
550  } else if (s->adobe_transform == 2 && s->bits <= 8) {
551  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
552  s->upscale_v[1] = s->upscale_v[2] = 1;
553  s->upscale_h[1] = s->upscale_h[2] = 1;
554  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
555  } else {
556  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
557  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
558  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
559  }
560  av_assert0(s->nb_components == 4);
561  break;
562  case 0x12121100:
563  case 0x22122100:
564  case 0x21211100:
565  case 0x21112100:
566  case 0x22211200:
567  case 0x22221100:
568  case 0x22112200:
569  case 0x11222200:
570  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
571  else
572  goto unk_pixfmt;
573  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
574  break;
575  case 0x11000000:
576  case 0x13000000:
577  case 0x14000000:
578  case 0x31000000:
579  case 0x33000000:
580  case 0x34000000:
581  case 0x41000000:
582  case 0x43000000:
583  case 0x44000000:
584  if(s->bits <= 8)
585  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
586  else
587  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
588  break;
589  case 0x12111100:
590  case 0x14121200:
591  case 0x14111100:
592  case 0x22211100:
593  case 0x22112100:
594  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
595  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
596  else
597  goto unk_pixfmt;
598  s->upscale_v[0] = s->upscale_v[1] = 1;
599  } else {
600  if (pix_fmt_id == 0x14111100)
601  s->upscale_v[1] = s->upscale_v[2] = 1;
602  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
603  else
604  goto unk_pixfmt;
605  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
606  }
607  break;
608  case 0x21111100:
609  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
610  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
611  else
612  goto unk_pixfmt;
613  s->upscale_h[0] = s->upscale_h[1] = 1;
614  } else {
615  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
616  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
617  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
618  }
619  break;
620  case 0x31111100:
621  if (s->bits > 8)
622  goto unk_pixfmt;
623  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
624  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
625  s->upscale_h[1] = s->upscale_h[2] = 2;
626  break;
627  case 0x22121100:
628  case 0x22111200:
629  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
630  else
631  goto unk_pixfmt;
632  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
633  break;
634  case 0x22111100:
635  case 0x23111100:
636  case 0x42111100:
637  case 0x24111100:
638  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
639  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
640  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
641  if (pix_fmt_id == 0x42111100) {
642  if (s->bits > 8)
643  goto unk_pixfmt;
644  s->upscale_h[1] = s->upscale_h[2] = 1;
645  } else if (pix_fmt_id == 0x24111100) {
646  if (s->bits > 8)
647  goto unk_pixfmt;
648  s->upscale_v[1] = s->upscale_v[2] = 1;
649  } else if (pix_fmt_id == 0x23111100) {
650  if (s->bits > 8)
651  goto unk_pixfmt;
652  s->upscale_v[1] = s->upscale_v[2] = 2;
653  }
654  break;
655  case 0x41111100:
656  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
657  else
658  goto unk_pixfmt;
659  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
660  break;
661  default:
662  unk_pixfmt:
663  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
664  memset(s->upscale_h, 0, sizeof(s->upscale_h));
665  memset(s->upscale_v, 0, sizeof(s->upscale_v));
666  return AVERROR_PATCHWELCOME;
667  }
668  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
669  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
670  return AVERROR_PATCHWELCOME;
671  }
672  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
673  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
674  return AVERROR_PATCHWELCOME;
675  }
676  if (s->ls) {
677  memset(s->upscale_h, 0, sizeof(s->upscale_h));
678  memset(s->upscale_v, 0, sizeof(s->upscale_v));
679  if (s->nb_components == 3) {
680  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
681  } else if (s->nb_components != 1) {
682  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
683  return AVERROR_PATCHWELCOME;
684  } else if (s->palette_index && s->bits <= 8 || s->force_pal8)
685  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
686  else if (s->bits <= 8)
687  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
688  else
689  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
690  }
691 
692  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
693  if (!s->pix_desc) {
694  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
695  return AVERROR_BUG;
696  }
697 
698  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
699  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
700  } else {
701  enum AVPixelFormat pix_fmts[] = {
702 #if CONFIG_MJPEG_NVDEC_HWACCEL
704 #endif
705 #if CONFIG_MJPEG_VAAPI_HWACCEL
707 #endif
708  s->avctx->pix_fmt,
710  };
711  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
712  if (s->hwaccel_pix_fmt < 0)
713  return AVERROR(EINVAL);
714 
715  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
716  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
717  }
718 
719  if (s->avctx->skip_frame == AVDISCARD_ALL) {
720  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
721  s->picture_ptr->key_frame = 1;
722  s->got_picture = 1;
723  return 0;
724  }
725 
726  av_frame_unref(s->picture_ptr);
727  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
728  return -1;
729  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
730  s->picture_ptr->key_frame = 1;
731  s->got_picture = 1;
732 
733  // Lets clear the palette to avoid leaving uninitialized values in it
734  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
735  memset(s->picture_ptr->data[1], 0, 1024);
736 
737  for (i = 0; i < 4; i++)
738  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
739 
740  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
741  s->width, s->height, s->linesize[0], s->linesize[1],
742  s->interlaced, s->avctx->height);
743 
744  }
745 
746  if ((s->rgb && !s->lossless && !s->ls) ||
747  (!s->rgb && s->ls && s->nb_components > 1) ||
748  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
749  ) {
750  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
751  return AVERROR_PATCHWELCOME;
752  }
753 
754  /* totally blank picture as progressive JPEG will only add details to it */
755  if (s->progressive) {
756  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
757  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
758  for (i = 0; i < s->nb_components; i++) {
759  int size = bw * bh * s->h_count[i] * s->v_count[i];
760  av_freep(&s->blocks[i]);
761  av_freep(&s->last_nnz[i]);
762  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
763  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
764  if (!s->blocks[i] || !s->last_nnz[i])
765  return AVERROR(ENOMEM);
766  s->block_stride[i] = bw * s->h_count[i];
767  }
768  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
769  }
770 
771  if (s->avctx->hwaccel) {
772  s->hwaccel_picture_private =
773  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
774  if (!s->hwaccel_picture_private)
775  return AVERROR(ENOMEM);
776 
777  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
778  s->raw_image_buffer_size);
779  if (ret < 0)
780  return ret;
781  }
782 
783  return 0;
784 }
785 
786 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
787 {
788  int code;
789  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
790  if (code < 0 || code > 16) {
791  av_log(s->avctx, AV_LOG_WARNING,
792  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
793  0, dc_index, &s->vlcs[0][dc_index]);
794  return 0xfffff;
795  }
796 
797  if (code)
798  return get_xbits(&s->gb, code);
799  else
800  return 0;
801 }
802 
803 /* decode block and dequantize */
804 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
805  int dc_index, int ac_index, uint16_t *quant_matrix)
806 {
807  int code, i, j, level, val;
808 
809  /* DC coef */
810  val = mjpeg_decode_dc(s, dc_index);
811  if (val == 0xfffff) {
812  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
813  return AVERROR_INVALIDDATA;
814  }
815  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
816  val = av_clip_int16(val);
817  s->last_dc[component] = val;
818  block[0] = val;
819  /* AC coefs */
820  i = 0;
821  {OPEN_READER(re, &s->gb);
822  do {
823  UPDATE_CACHE(re, &s->gb);
824  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
825 
826  i += ((unsigned)code) >> 4;
827  code &= 0xf;
828  if (code) {
829  if (code > MIN_CACHE_BITS - 16)
830  UPDATE_CACHE(re, &s->gb);
831 
832  {
833  int cache = GET_CACHE(re, &s->gb);
834  int sign = (~cache) >> 31;
835  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
836  }
837 
838  LAST_SKIP_BITS(re, &s->gb, code);
839 
840  if (i > 63) {
841  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
842  return AVERROR_INVALIDDATA;
843  }
844  j = s->scantable.permutated[i];
845  block[j] = level * quant_matrix[i];
846  }
847  } while (i < 63);
848  CLOSE_READER(re, &s->gb);}
849 
850  return 0;
851 }
852 
854  int component, int dc_index,
855  uint16_t *quant_matrix, int Al)
856 {
857  unsigned val;
858  s->bdsp.clear_block(block);
859  val = mjpeg_decode_dc(s, dc_index);
860  if (val == 0xfffff) {
861  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
862  return AVERROR_INVALIDDATA;
863  }
864  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
865  s->last_dc[component] = val;
866  block[0] = val;
867  return 0;
868 }
869 
870 /* decode block and dequantize - progressive JPEG version */
872  uint8_t *last_nnz, int ac_index,
873  uint16_t *quant_matrix,
874  int ss, int se, int Al, int *EOBRUN)
875 {
876  int code, i, j, val, run;
877  unsigned level;
878 
879  if (*EOBRUN) {
880  (*EOBRUN)--;
881  return 0;
882  }
883 
884  {
885  OPEN_READER(re, &s->gb);
886  for (i = ss; ; i++) {
887  UPDATE_CACHE(re, &s->gb);
888  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
889 
890  run = ((unsigned) code) >> 4;
891  code &= 0xF;
892  if (code) {
893  i += run;
894  if (code > MIN_CACHE_BITS - 16)
895  UPDATE_CACHE(re, &s->gb);
896 
897  {
898  int cache = GET_CACHE(re, &s->gb);
899  int sign = (~cache) >> 31;
900  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
901  }
902 
903  LAST_SKIP_BITS(re, &s->gb, code);
904 
905  if (i >= se) {
906  if (i == se) {
907  j = s->scantable.permutated[se];
908  block[j] = level * (quant_matrix[se] << Al);
909  break;
910  }
911  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
912  return AVERROR_INVALIDDATA;
913  }
914  j = s->scantable.permutated[i];
915  block[j] = level * (quant_matrix[i] << Al);
916  } else {
917  if (run == 0xF) {// ZRL - skip 15 coefficients
918  i += 15;
919  if (i >= se) {
920  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
921  return AVERROR_INVALIDDATA;
922  }
923  } else {
924  val = (1 << run);
925  if (run) {
926  UPDATE_CACHE(re, &s->gb);
927  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
928  LAST_SKIP_BITS(re, &s->gb, run);
929  }
930  *EOBRUN = val - 1;
931  break;
932  }
933  }
934  }
935  CLOSE_READER(re, &s->gb);
936  }
937 
938  if (i > *last_nnz)
939  *last_nnz = i;
940 
941  return 0;
942 }
943 
944 #define REFINE_BIT(j) { \
945  UPDATE_CACHE(re, &s->gb); \
946  sign = block[j] >> 15; \
947  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
948  ((quant_matrix[i] ^ sign) - sign) << Al; \
949  LAST_SKIP_BITS(re, &s->gb, 1); \
950 }
951 
952 #define ZERO_RUN \
953 for (; ; i++) { \
954  if (i > last) { \
955  i += run; \
956  if (i > se) { \
957  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
958  return -1; \
959  } \
960  break; \
961  } \
962  j = s->scantable.permutated[i]; \
963  if (block[j]) \
964  REFINE_BIT(j) \
965  else if (run-- == 0) \
966  break; \
967 }
968 
969 /* decode block and dequantize - progressive JPEG refinement pass */
971  uint8_t *last_nnz,
972  int ac_index, uint16_t *quant_matrix,
973  int ss, int se, int Al, int *EOBRUN)
974 {
975  int code, i = ss, j, sign, val, run;
976  int last = FFMIN(se, *last_nnz);
977 
978  OPEN_READER(re, &s->gb);
979  if (*EOBRUN) {
980  (*EOBRUN)--;
981  } else {
982  for (; ; i++) {
983  UPDATE_CACHE(re, &s->gb);
984  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
985 
986  if (code & 0xF) {
987  run = ((unsigned) code) >> 4;
988  UPDATE_CACHE(re, &s->gb);
989  val = SHOW_UBITS(re, &s->gb, 1);
990  LAST_SKIP_BITS(re, &s->gb, 1);
991  ZERO_RUN;
992  j = s->scantable.permutated[i];
993  val--;
994  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
995  if (i == se) {
996  if (i > *last_nnz)
997  *last_nnz = i;
998  CLOSE_READER(re, &s->gb);
999  return 0;
1000  }
1001  } else {
1002  run = ((unsigned) code) >> 4;
1003  if (run == 0xF) {
1004  ZERO_RUN;
1005  } else {
1006  val = run;
1007  run = (1 << run);
1008  if (val) {
1009  UPDATE_CACHE(re, &s->gb);
1010  run += SHOW_UBITS(re, &s->gb, val);
1011  LAST_SKIP_BITS(re, &s->gb, val);
1012  }
1013  *EOBRUN = run - 1;
1014  break;
1015  }
1016  }
1017  }
1018 
1019  if (i > *last_nnz)
1020  *last_nnz = i;
1021  }
1022 
1023  for (; i <= last; i++) {
1024  j = s->scantable.permutated[i];
1025  if (block[j])
1026  REFINE_BIT(j)
1027  }
1028  CLOSE_READER(re, &s->gb);
1029 
1030  return 0;
1031 }
1032 #undef REFINE_BIT
1033 #undef ZERO_RUN
1034 
1035 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1036 {
1037  int i;
1038  int reset = 0;
1039 
1040  if (s->restart_interval) {
1041  s->restart_count--;
1042  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1043  align_get_bits(&s->gb);
1044  for (i = 0; i < nb_components; i++) /* reset dc */
1045  s->last_dc[i] = (4 << s->bits);
1046  }
1047 
1048  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1049  /* skip RSTn */
1050  if (s->restart_count == 0) {
1051  if( show_bits(&s->gb, i) == (1 << i) - 1
1052  || show_bits(&s->gb, i) == 0xFF) {
1053  int pos = get_bits_count(&s->gb);
1054  align_get_bits(&s->gb);
1055  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1056  skip_bits(&s->gb, 8);
1057  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1058  for (i = 0; i < nb_components; i++) /* reset dc */
1059  s->last_dc[i] = (4 << s->bits);
1060  reset = 1;
1061  } else
1062  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1063  }
1064  }
1065  }
1066  return reset;
1067 }
1068 
1069 /* Handles 1 to 4 components */
1070 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1071 {
1072  int i, mb_x, mb_y;
1073  unsigned width;
1074  uint16_t (*buffer)[4];
1075  int left[4], top[4], topleft[4];
1076  const int linesize = s->linesize[0];
1077  const int mask = ((1 << s->bits) - 1) << point_transform;
1078  int resync_mb_y = 0;
1079  int resync_mb_x = 0;
1080  int vpred[6];
1081 
1082  if (!s->bayer && s->nb_components < 3)
1083  return AVERROR_INVALIDDATA;
1084  if (s->bayer && s->nb_components > 2)
1085  return AVERROR_INVALIDDATA;
1086  if (s->nb_components <= 0 || s->nb_components > 4)
1087  return AVERROR_INVALIDDATA;
1088  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1089  return AVERROR_INVALIDDATA;
1090 
1091 
1092  s->restart_count = s->restart_interval;
1093 
1094  if (s->restart_interval == 0)
1095  s->restart_interval = INT_MAX;
1096 
1097  if (s->bayer)
1098  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1099  else
1100  width = s->mb_width;
1101 
1102  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1103  if (!s->ljpeg_buffer)
1104  return AVERROR(ENOMEM);
1105 
1106  buffer = s->ljpeg_buffer;
1107 
1108  for (i = 0; i < 4; i++)
1109  buffer[0][i] = 1 << (s->bits - 1);
1110 
1111  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1112  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1113 
1114  if (s->interlaced && s->bottom_field)
1115  ptr += linesize >> 1;
1116 
1117  for (i = 0; i < 4; i++)
1118  top[i] = left[i] = topleft[i] = buffer[0][i];
1119 
1120  if ((mb_y * s->width) % s->restart_interval == 0) {
1121  for (i = 0; i < 6; i++)
1122  vpred[i] = 1 << (s->bits-1);
1123  }
1124 
1125  for (mb_x = 0; mb_x < width; mb_x++) {
1126  int modified_predictor = predictor;
1127 
1128  if (get_bits_left(&s->gb) < 1) {
1129  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1130  return AVERROR_INVALIDDATA;
1131  }
1132 
1133  if (s->restart_interval && !s->restart_count){
1134  s->restart_count = s->restart_interval;
1135  resync_mb_x = mb_x;
1136  resync_mb_y = mb_y;
1137  for(i=0; i<4; i++)
1138  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1139  }
1140  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1141  modified_predictor = 1;
1142 
1143  for (i=0;i<nb_components;i++) {
1144  int pred, dc;
1145 
1146  topleft[i] = top[i];
1147  top[i] = buffer[mb_x][i];
1148 
1149  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1150  if(dc == 0xFFFFF)
1151  return -1;
1152 
1153  if (!s->bayer || mb_x) {
1154  pred = left[i];
1155  } else { /* This path runs only for the first line in bayer images */
1156  vpred[i] += dc;
1157  pred = vpred[i] - dc;
1158  }
1159 
1160  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1161 
1162  left[i] = buffer[mb_x][i] =
1163  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1164  }
1165 
1166  if (s->restart_interval && !--s->restart_count) {
1167  align_get_bits(&s->gb);
1168  skip_bits(&s->gb, 16); /* skip RSTn */
1169  }
1170  }
1171  if (s->rct && s->nb_components == 4) {
1172  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1173  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1174  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1175  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1176  ptr[4*mb_x + 0] = buffer[mb_x][3];
1177  }
1178  } else if (s->nb_components == 4) {
1179  for(i=0; i<nb_components; i++) {
1180  int c= s->comp_index[i];
1181  if (s->bits <= 8) {
1182  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1183  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1184  }
1185  } else if(s->bits == 9) {
1186  return AVERROR_PATCHWELCOME;
1187  } else {
1188  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1189  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1190  }
1191  }
1192  }
1193  } else if (s->rct) {
1194  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1195  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1196  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1197  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1198  }
1199  } else if (s->pegasus_rct) {
1200  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1201  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1202  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1203  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1204  }
1205  } else if (s->bayer) {
1206  if (nb_components == 1) {
1207  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1208  for (mb_x = 0; mb_x < width; mb_x++)
1209  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1210  } else if (nb_components == 2) {
1211  for (mb_x = 0; mb_x < width; mb_x++) {
1212  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1213  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1214  }
1215  }
1216  } else {
1217  for(i=0; i<nb_components; i++) {
1218  int c= s->comp_index[i];
1219  if (s->bits <= 8) {
1220  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1221  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1222  }
1223  } else if(s->bits == 9) {
1224  return AVERROR_PATCHWELCOME;
1225  } else {
1226  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1227  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1228  }
1229  }
1230  }
1231  }
1232  }
1233  return 0;
1234 }
1235 
1237  int point_transform, int nb_components)
1238 {
1239  int i, mb_x, mb_y, mask;
1240  int bits= (s->bits+7)&~7;
1241  int resync_mb_y = 0;
1242  int resync_mb_x = 0;
1243 
1244  point_transform += bits - s->bits;
1245  mask = ((1 << s->bits) - 1) << point_transform;
1246 
1247  av_assert0(nb_components>=1 && nb_components<=4);
1248 
1249  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1250  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1251  if (get_bits_left(&s->gb) < 1) {
1252  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1253  return AVERROR_INVALIDDATA;
1254  }
1255  if (s->restart_interval && !s->restart_count){
1256  s->restart_count = s->restart_interval;
1257  resync_mb_x = mb_x;
1258  resync_mb_y = mb_y;
1259  }
1260 
1261  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1262  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1263  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1264  for (i = 0; i < nb_components; i++) {
1265  uint8_t *ptr;
1266  uint16_t *ptr16;
1267  int n, h, v, x, y, c, j, linesize;
1268  n = s->nb_blocks[i];
1269  c = s->comp_index[i];
1270  h = s->h_scount[i];
1271  v = s->v_scount[i];
1272  x = 0;
1273  y = 0;
1274  linesize= s->linesize[c];
1275 
1276  if(bits>8) linesize /= 2;
1277 
1278  for(j=0; j<n; j++) {
1279  int pred, dc;
1280 
1281  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1282  if(dc == 0xFFFFF)
1283  return -1;
1284  if ( h * mb_x + x >= s->width
1285  || v * mb_y + y >= s->height) {
1286  // Nothing to do
1287  } else if (bits<=8) {
1288  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1289  if(y==0 && toprow){
1290  if(x==0 && leftcol){
1291  pred= 1 << (bits - 1);
1292  }else{
1293  pred= ptr[-1];
1294  }
1295  }else{
1296  if(x==0 && leftcol){
1297  pred= ptr[-linesize];
1298  }else{
1299  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1300  }
1301  }
1302 
1303  if (s->interlaced && s->bottom_field)
1304  ptr += linesize >> 1;
1305  pred &= mask;
1306  *ptr= pred + ((unsigned)dc << point_transform);
1307  }else{
1308  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1309  if(y==0 && toprow){
1310  if(x==0 && leftcol){
1311  pred= 1 << (bits - 1);
1312  }else{
1313  pred= ptr16[-1];
1314  }
1315  }else{
1316  if(x==0 && leftcol){
1317  pred= ptr16[-linesize];
1318  }else{
1319  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1320  }
1321  }
1322 
1323  if (s->interlaced && s->bottom_field)
1324  ptr16 += linesize >> 1;
1325  pred &= mask;
1326  *ptr16= pred + ((unsigned)dc << point_transform);
1327  }
1328  if (++x == h) {
1329  x = 0;
1330  y++;
1331  }
1332  }
1333  }
1334  } else {
1335  for (i = 0; i < nb_components; i++) {
1336  uint8_t *ptr;
1337  uint16_t *ptr16;
1338  int n, h, v, x, y, c, j, linesize, dc;
1339  n = s->nb_blocks[i];
1340  c = s->comp_index[i];
1341  h = s->h_scount[i];
1342  v = s->v_scount[i];
1343  x = 0;
1344  y = 0;
1345  linesize = s->linesize[c];
1346 
1347  if(bits>8) linesize /= 2;
1348 
1349  for (j = 0; j < n; j++) {
1350  int pred;
1351 
1352  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1353  if(dc == 0xFFFFF)
1354  return -1;
1355  if ( h * mb_x + x >= s->width
1356  || v * mb_y + y >= s->height) {
1357  // Nothing to do
1358  } else if (bits<=8) {
1359  ptr = s->picture_ptr->data[c] +
1360  (linesize * (v * mb_y + y)) +
1361  (h * mb_x + x); //FIXME optimize this crap
1362  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1363 
1364  pred &= mask;
1365  *ptr = pred + ((unsigned)dc << point_transform);
1366  }else{
1367  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1368  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1369 
1370  pred &= mask;
1371  *ptr16= pred + ((unsigned)dc << point_transform);
1372  }
1373 
1374  if (++x == h) {
1375  x = 0;
1376  y++;
1377  }
1378  }
1379  }
1380  }
1381  if (s->restart_interval && !--s->restart_count) {
1382  align_get_bits(&s->gb);
1383  skip_bits(&s->gb, 16); /* skip RSTn */
1384  }
1385  }
1386  }
1387  return 0;
1388 }
1389 
1391  uint8_t *dst, const uint8_t *src,
1392  int linesize, int lowres)
1393 {
1394  switch (lowres) {
1395  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1396  break;
1397  case 1: copy_block4(dst, src, linesize, linesize, 4);
1398  break;
1399  case 2: copy_block2(dst, src, linesize, linesize, 2);
1400  break;
1401  case 3: *dst = *src;
1402  break;
1403  }
1404 }
1405 
1406 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1407 {
1408  int block_x, block_y;
1409  int size = 8 >> s->avctx->lowres;
1410  if (s->bits > 8) {
1411  for (block_y=0; block_y<size; block_y++)
1412  for (block_x=0; block_x<size; block_x++)
1413  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1414  } else {
1415  for (block_y=0; block_y<size; block_y++)
1416  for (block_x=0; block_x<size; block_x++)
1417  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1418  }
1419 }
1420 
1421 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1422  int Al, const uint8_t *mb_bitmask,
1423  int mb_bitmask_size,
1424  const AVFrame *reference)
1425 {
1426  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1427  uint8_t *data[MAX_COMPONENTS];
1428  const uint8_t *reference_data[MAX_COMPONENTS];
1429  int linesize[MAX_COMPONENTS];
1430  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1431  int bytes_per_pixel = 1 + (s->bits > 8);
1432 
1433  if (mb_bitmask) {
1434  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1435  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1436  return AVERROR_INVALIDDATA;
1437  }
1438  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1439  }
1440 
1441  s->restart_count = 0;
1442 
1443  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1444  &chroma_v_shift);
1445  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1446  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1447 
1448  for (i = 0; i < nb_components; i++) {
1449  int c = s->comp_index[i];
1450  data[c] = s->picture_ptr->data[c];
1451  reference_data[c] = reference ? reference->data[c] : NULL;
1452  linesize[c] = s->linesize[c];
1453  s->coefs_finished[c] |= 1;
1454  }
1455 
1456  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1457  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1458  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1459 
1460  if (s->restart_interval && !s->restart_count)
1461  s->restart_count = s->restart_interval;
1462 
1463  if (get_bits_left(&s->gb) < 0) {
1464  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1465  -get_bits_left(&s->gb));
1466  return AVERROR_INVALIDDATA;
1467  }
1468  for (i = 0; i < nb_components; i++) {
1469  uint8_t *ptr;
1470  int n, h, v, x, y, c, j;
1471  int block_offset;
1472  n = s->nb_blocks[i];
1473  c = s->comp_index[i];
1474  h = s->h_scount[i];
1475  v = s->v_scount[i];
1476  x = 0;
1477  y = 0;
1478  for (j = 0; j < n; j++) {
1479  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1480  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1481 
1482  if (s->interlaced && s->bottom_field)
1483  block_offset += linesize[c] >> 1;
1484  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1485  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1486  ptr = data[c] + block_offset;
1487  } else
1488  ptr = NULL;
1489  if (!s->progressive) {
1490  if (copy_mb) {
1491  if (ptr)
1492  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1493  linesize[c], s->avctx->lowres);
1494 
1495  } else {
1496  s->bdsp.clear_block(s->block);
1497  if (decode_block(s, s->block, i,
1498  s->dc_index[i], s->ac_index[i],
1499  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1500  av_log(s->avctx, AV_LOG_ERROR,
1501  "error y=%d x=%d\n", mb_y, mb_x);
1502  return AVERROR_INVALIDDATA;
1503  }
1504  if (ptr) {
1505  s->idsp.idct_put(ptr, linesize[c], s->block);
1506  if (s->bits & 7)
1507  shift_output(s, ptr, linesize[c]);
1508  }
1509  }
1510  } else {
1511  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1512  (h * mb_x + x);
1513  int16_t *block = s->blocks[c][block_idx];
1514  if (Ah)
1515  block[0] += get_bits1(&s->gb) *
1516  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1517  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1518  s->quant_matrixes[s->quant_sindex[i]],
1519  Al) < 0) {
1520  av_log(s->avctx, AV_LOG_ERROR,
1521  "error y=%d x=%d\n", mb_y, mb_x);
1522  return AVERROR_INVALIDDATA;
1523  }
1524  }
1525  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1526  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1527  mb_x, mb_y, x, y, c, s->bottom_field,
1528  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1529  if (++x == h) {
1530  x = 0;
1531  y++;
1532  }
1533  }
1534  }
1535 
1536  handle_rstn(s, nb_components);
1537  }
1538  }
1539  return 0;
1540 }
1541 
1543  int se, int Ah, int Al)
1544 {
1545  int mb_x, mb_y;
1546  int EOBRUN = 0;
1547  int c = s->comp_index[0];
1548  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1549 
1550  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1551  if (se < ss || se > 63) {
1552  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1553  return AVERROR_INVALIDDATA;
1554  }
1555 
1556  // s->coefs_finished is a bitmask for coefficients coded
1557  // ss and se are parameters telling start and end coefficients
1558  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1559 
1560  s->restart_count = 0;
1561 
1562  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1563  int block_idx = mb_y * s->block_stride[c];
1564  int16_t (*block)[64] = &s->blocks[c][block_idx];
1565  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1566  if (get_bits_left(&s->gb) <= 0) {
1567  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1568  return AVERROR_INVALIDDATA;
1569  }
1570  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1571  int ret;
1572  if (s->restart_interval && !s->restart_count)
1573  s->restart_count = s->restart_interval;
1574 
1575  if (Ah)
1576  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1577  quant_matrix, ss, se, Al, &EOBRUN);
1578  else
1579  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1580  quant_matrix, ss, se, Al, &EOBRUN);
1581  if (ret < 0) {
1582  av_log(s->avctx, AV_LOG_ERROR,
1583  "error y=%d x=%d\n", mb_y, mb_x);
1584  return AVERROR_INVALIDDATA;
1585  }
1586 
1587  if (handle_rstn(s, 0))
1588  EOBRUN = 0;
1589  }
1590  }
1591  return 0;
1592 }
1593 
1595 {
1596  int mb_x, mb_y;
1597  int c;
1598  const int bytes_per_pixel = 1 + (s->bits > 8);
1599  const int block_size = s->lossless ? 1 : 8;
1600 
1601  for (c = 0; c < s->nb_components; c++) {
1602  uint8_t *data = s->picture_ptr->data[c];
1603  int linesize = s->linesize[c];
1604  int h = s->h_max / s->h_count[c];
1605  int v = s->v_max / s->v_count[c];
1606  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1607  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1608 
1609  if (~s->coefs_finished[c])
1610  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1611 
1612  if (s->interlaced && s->bottom_field)
1613  data += linesize >> 1;
1614 
1615  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1616  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1617  int block_idx = mb_y * s->block_stride[c];
1618  int16_t (*block)[64] = &s->blocks[c][block_idx];
1619  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1620  s->idsp.idct_put(ptr, linesize, *block);
1621  if (s->bits & 7)
1622  shift_output(s, ptr, linesize);
1623  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1624  }
1625  }
1626  }
1627 }
1628 
1629 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1630  int mb_bitmask_size, const AVFrame *reference)
1631 {
1632  int len, nb_components, i, h, v, predictor, point_transform;
1633  int index, id, ret;
1634  const int block_size = s->lossless ? 1 : 8;
1635  int ilv, prev_shift;
1636 
1637  if (!s->got_picture) {
1638  av_log(s->avctx, AV_LOG_WARNING,
1639  "Can not process SOS before SOF, skipping\n");
1640  return -1;
1641  }
1642 
1643  if (reference) {
1644  if (reference->width != s->picture_ptr->width ||
1645  reference->height != s->picture_ptr->height ||
1646  reference->format != s->picture_ptr->format) {
1647  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1648  return AVERROR_INVALIDDATA;
1649  }
1650  }
1651 
1652  /* XXX: verify len field validity */
1653  len = get_bits(&s->gb, 16);
1654  nb_components = get_bits(&s->gb, 8);
1655  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1657  "decode_sos: nb_components (%d)",
1658  nb_components);
1659  return AVERROR_PATCHWELCOME;
1660  }
1661  if (len != 6 + 2 * nb_components) {
1662  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1663  return AVERROR_INVALIDDATA;
1664  }
1665  for (i = 0; i < nb_components; i++) {
1666  id = get_bits(&s->gb, 8) - 1;
1667  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1668  /* find component index */
1669  for (index = 0; index < s->nb_components; index++)
1670  if (id == s->component_id[index])
1671  break;
1672  if (index == s->nb_components) {
1673  av_log(s->avctx, AV_LOG_ERROR,
1674  "decode_sos: index(%d) out of components\n", index);
1675  return AVERROR_INVALIDDATA;
1676  }
1677  /* Metasoft MJPEG codec has Cb and Cr swapped */
1678  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1679  && nb_components == 3 && s->nb_components == 3 && i)
1680  index = 3 - i;
1681 
1682  s->quant_sindex[i] = s->quant_index[index];
1683  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1684  s->h_scount[i] = s->h_count[index];
1685  s->v_scount[i] = s->v_count[index];
1686 
1687  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1688  index = (index+2)%3;
1689 
1690  s->comp_index[i] = index;
1691 
1692  s->dc_index[i] = get_bits(&s->gb, 4);
1693  s->ac_index[i] = get_bits(&s->gb, 4);
1694 
1695  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1696  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1697  goto out_of_range;
1698  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1699  goto out_of_range;
1700  }
1701 
1702  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1703  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1704  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1705  prev_shift = get_bits(&s->gb, 4); /* Ah */
1706  point_transform = get_bits(&s->gb, 4); /* Al */
1707  }else
1708  prev_shift = point_transform = 0;
1709 
1710  if (nb_components > 1) {
1711  /* interleaved stream */
1712  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1713  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1714  } else if (!s->ls) { /* skip this for JPEG-LS */
1715  h = s->h_max / s->h_scount[0];
1716  v = s->v_max / s->v_scount[0];
1717  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1718  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1719  s->nb_blocks[0] = 1;
1720  s->h_scount[0] = 1;
1721  s->v_scount[0] = 1;
1722  }
1723 
1724  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1725  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1726  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1727  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1728  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1729 
1730 
1731  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1732  for (i = s->mjpb_skiptosod; i > 0; i--)
1733  skip_bits(&s->gb, 8);
1734 
1735 next_field:
1736  for (i = 0; i < nb_components; i++)
1737  s->last_dc[i] = (4 << s->bits);
1738 
1739  if (s->avctx->hwaccel) {
1740  int bytes_to_start = get_bits_count(&s->gb) / 8;
1741  av_assert0(bytes_to_start >= 0 &&
1742  s->raw_scan_buffer_size >= bytes_to_start);
1743 
1744  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1745  s->raw_scan_buffer + bytes_to_start,
1746  s->raw_scan_buffer_size - bytes_to_start);
1747  if (ret < 0)
1748  return ret;
1749 
1750  } else if (s->lossless) {
1751  av_assert0(s->picture_ptr == s->picture);
1752  if (CONFIG_JPEGLS_DECODER && s->ls) {
1753 // for () {
1754 // reset_ls_coding_parameters(s, 0);
1755 
1757  point_transform, ilv)) < 0)
1758  return ret;
1759  } else {
1760  if (s->rgb || s->bayer) {
1761  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1762  return ret;
1763  } else {
1765  point_transform,
1766  nb_components)) < 0)
1767  return ret;
1768  }
1769  }
1770  } else {
1771  if (s->progressive && predictor) {
1772  av_assert0(s->picture_ptr == s->picture);
1774  ilv, prev_shift,
1775  point_transform)) < 0)
1776  return ret;
1777  } else {
1778  if ((ret = mjpeg_decode_scan(s, nb_components,
1779  prev_shift, point_transform,
1780  mb_bitmask, mb_bitmask_size, reference)) < 0)
1781  return ret;
1782  }
1783  }
1784 
1785  if (s->interlaced &&
1786  get_bits_left(&s->gb) > 32 &&
1787  show_bits(&s->gb, 8) == 0xFF) {
1788  GetBitContext bak = s->gb;
1789  align_get_bits(&bak);
1790  if (show_bits(&bak, 16) == 0xFFD1) {
1791  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1792  s->gb = bak;
1793  skip_bits(&s->gb, 16);
1794  s->bottom_field ^= 1;
1795 
1796  goto next_field;
1797  }
1798  }
1799 
1800  emms_c();
1801  return 0;
1802  out_of_range:
1803  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1804  return AVERROR_INVALIDDATA;
1805 }
1806 
1808 {
1809  if (get_bits(&s->gb, 16) != 4)
1810  return AVERROR_INVALIDDATA;
1811  s->restart_interval = get_bits(&s->gb, 16);
1812  s->restart_count = 0;
1813  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1814  s->restart_interval);
1815 
1816  return 0;
1817 }
1818 
1820 {
1821  int len, id, i;
1822 
1823  len = get_bits(&s->gb, 16);
1824  if (len < 6) {
1825  if (s->bayer) {
1826  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1827  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1828  skip_bits(&s->gb, len);
1829  return 0;
1830  } else
1831  return AVERROR_INVALIDDATA;
1832  }
1833  if (8 * len > get_bits_left(&s->gb))
1834  return AVERROR_INVALIDDATA;
1835 
1836  id = get_bits_long(&s->gb, 32);
1837  len -= 6;
1838 
1839  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1840  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1841  av_fourcc2str(av_bswap32(id)), id, len);
1842 
1843  /* Buggy AVID, it puts EOI only at every 10th frame. */
1844  /* Also, this fourcc is used by non-avid files too, it holds some
1845  information, but it's always present in AVID-created files. */
1846  if (id == AV_RB32("AVI1")) {
1847  /* structure:
1848  4bytes AVI1
1849  1bytes polarity
1850  1bytes always zero
1851  4bytes field_size
1852  4bytes field_size_less_padding
1853  */
1854  s->buggy_avid = 1;
1855  i = get_bits(&s->gb, 8); len--;
1856  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1857  goto out;
1858  }
1859 
1860  if (id == AV_RB32("JFIF")) {
1861  int t_w, t_h, v1, v2;
1862  if (len < 8)
1863  goto out;
1864  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1865  v1 = get_bits(&s->gb, 8);
1866  v2 = get_bits(&s->gb, 8);
1867  skip_bits(&s->gb, 8);
1868 
1869  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1870  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1871  if ( s->avctx->sample_aspect_ratio.num <= 0
1872  || s->avctx->sample_aspect_ratio.den <= 0) {
1873  s->avctx->sample_aspect_ratio.num = 0;
1874  s->avctx->sample_aspect_ratio.den = 1;
1875  }
1876 
1877  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1878  av_log(s->avctx, AV_LOG_INFO,
1879  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1880  v1, v2,
1881  s->avctx->sample_aspect_ratio.num,
1882  s->avctx->sample_aspect_ratio.den);
1883 
1884  len -= 8;
1885  if (len >= 2) {
1886  t_w = get_bits(&s->gb, 8);
1887  t_h = get_bits(&s->gb, 8);
1888  if (t_w && t_h) {
1889  /* skip thumbnail */
1890  if (len -10 - (t_w * t_h * 3) > 0)
1891  len -= t_w * t_h * 3;
1892  }
1893  len -= 2;
1894  }
1895  goto out;
1896  }
1897 
1898  if ( id == AV_RB32("Adob")
1899  && len >= 7
1900  && show_bits(&s->gb, 8) == 'e'
1901  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1902  skip_bits(&s->gb, 8); /* 'e' */
1903  skip_bits(&s->gb, 16); /* version */
1904  skip_bits(&s->gb, 16); /* flags0 */
1905  skip_bits(&s->gb, 16); /* flags1 */
1906  s->adobe_transform = get_bits(&s->gb, 8);
1907  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1908  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1909  len -= 7;
1910  goto out;
1911  }
1912 
1913  if (id == AV_RB32("LJIF")) {
1914  int rgb = s->rgb;
1915  int pegasus_rct = s->pegasus_rct;
1916  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1917  av_log(s->avctx, AV_LOG_INFO,
1918  "Pegasus lossless jpeg header found\n");
1919  skip_bits(&s->gb, 16); /* version ? */
1920  skip_bits(&s->gb, 16); /* unknown always 0? */
1921  skip_bits(&s->gb, 16); /* unknown always 0? */
1922  skip_bits(&s->gb, 16); /* unknown always 0? */
1923  switch (i=get_bits(&s->gb, 8)) {
1924  case 1:
1925  rgb = 1;
1926  pegasus_rct = 0;
1927  break;
1928  case 2:
1929  rgb = 1;
1930  pegasus_rct = 1;
1931  break;
1932  default:
1933  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1934  }
1935 
1936  len -= 9;
1937  if (s->got_picture)
1938  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1939  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1940  goto out;
1941  }
1942 
1943  s->rgb = rgb;
1944  s->pegasus_rct = pegasus_rct;
1945 
1946  goto out;
1947  }
1948  if (id == AV_RL32("colr") && len > 0) {
1949  s->colr = get_bits(&s->gb, 8);
1950  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1951  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1952  len --;
1953  goto out;
1954  }
1955  if (id == AV_RL32("xfrm") && len > 0) {
1956  s->xfrm = get_bits(&s->gb, 8);
1957  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1958  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1959  len --;
1960  goto out;
1961  }
1962 
1963  /* JPS extension by VRex */
1964  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1965  int flags, layout, type;
1966  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1967  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1968 
1969  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1970  skip_bits(&s->gb, 16); len -= 2; /* block length */
1971  skip_bits(&s->gb, 8); /* reserved */
1972  flags = get_bits(&s->gb, 8);
1973  layout = get_bits(&s->gb, 8);
1974  type = get_bits(&s->gb, 8);
1975  len -= 4;
1976 
1977  av_freep(&s->stereo3d);
1978  s->stereo3d = av_stereo3d_alloc();
1979  if (!s->stereo3d) {
1980  goto out;
1981  }
1982  if (type == 0) {
1983  s->stereo3d->type = AV_STEREO3D_2D;
1984  } else if (type == 1) {
1985  switch (layout) {
1986  case 0x01:
1987  s->stereo3d->type = AV_STEREO3D_LINES;
1988  break;
1989  case 0x02:
1990  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1991  break;
1992  case 0x03:
1993  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1994  break;
1995  }
1996  if (!(flags & 0x04)) {
1997  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
1998  }
1999  }
2000  goto out;
2001  }
2002 
2003  /* EXIF metadata */
2004  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2005  GetByteContext gbytes;
2006  int ret, le, ifd_offset, bytes_read;
2007  const uint8_t *aligned;
2008 
2009  skip_bits(&s->gb, 16); // skip padding
2010  len -= 2;
2011 
2012  // init byte wise reading
2013  aligned = align_get_bits(&s->gb);
2014  bytestream2_init(&gbytes, aligned, len);
2015 
2016  // read TIFF header
2017  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2018  if (ret) {
2019  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2020  } else {
2021  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2022 
2023  // read 0th IFD and store the metadata
2024  // (return values > 0 indicate the presence of subimage metadata)
2025  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2026  if (ret < 0) {
2027  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2028  }
2029  }
2030 
2031  bytes_read = bytestream2_tell(&gbytes);
2032  skip_bits(&s->gb, bytes_read << 3);
2033  len -= bytes_read;
2034 
2035  goto out;
2036  }
2037 
2038  /* Apple MJPEG-A */
2039  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2040  id = get_bits_long(&s->gb, 32);
2041  len -= 4;
2042  /* Apple MJPEG-A */
2043  if (id == AV_RB32("mjpg")) {
2044  /* structure:
2045  4bytes field size
2046  4bytes pad field size
2047  4bytes next off
2048  4bytes quant off
2049  4bytes huff off
2050  4bytes image off
2051  4bytes scan off
2052  4bytes data off
2053  */
2054  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2055  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2056  }
2057  }
2058 
2059  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2060  int id2;
2061  unsigned seqno;
2062  unsigned nummarkers;
2063 
2064  id = get_bits_long(&s->gb, 32);
2065  id2 = get_bits(&s->gb, 24);
2066  len -= 7;
2067  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2068  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2069  goto out;
2070  }
2071 
2072  skip_bits(&s->gb, 8);
2073  seqno = get_bits(&s->gb, 8);
2074  len -= 2;
2075  if (seqno == 0) {
2076  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2077  goto out;
2078  }
2079 
2080  nummarkers = get_bits(&s->gb, 8);
2081  len -= 1;
2082  if (nummarkers == 0) {
2083  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2084  goto out;
2085  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2086  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2087  goto out;
2088  } else if (seqno > nummarkers) {
2089  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2090  goto out;
2091  }
2092 
2093  /* Allocate if this is the first APP2 we've seen. */
2094  if (s->iccnum == 0) {
2095  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2096  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2097  return AVERROR(ENOMEM);
2098  }
2099  s->iccnum = nummarkers;
2100  }
2101 
2102  if (s->iccentries[seqno - 1].data) {
2103  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2104  goto out;
2105  }
2106 
2107  s->iccentries[seqno - 1].length = len;
2108  s->iccentries[seqno - 1].data = av_malloc(len);
2109  if (!s->iccentries[seqno - 1].data) {
2110  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2111  return AVERROR(ENOMEM);
2112  }
2113 
2114  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2115  skip_bits(&s->gb, len << 3);
2116  len = 0;
2117  s->iccread++;
2118 
2119  if (s->iccread > s->iccnum)
2120  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2121  }
2122 
2123 out:
2124  /* slow but needed for extreme adobe jpegs */
2125  if (len < 0)
2126  av_log(s->avctx, AV_LOG_ERROR,
2127  "mjpeg: error, decode_app parser read over the end\n");
2128  while (--len > 0)
2129  skip_bits(&s->gb, 8);
2130 
2131  return 0;
2132 }
2133 
2135 {
2136  int len = get_bits(&s->gb, 16);
2137  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2138  int i;
2139  char *cbuf = av_malloc(len - 1);
2140  if (!cbuf)
2141  return AVERROR(ENOMEM);
2142 
2143  for (i = 0; i < len - 2; i++)
2144  cbuf[i] = get_bits(&s->gb, 8);
2145  if (i > 0 && cbuf[i - 1] == '\n')
2146  cbuf[i - 1] = 0;
2147  else
2148  cbuf[i] = 0;
2149 
2150  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2151  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2152 
2153  /* buggy avid, it puts EOI only at every 10th frame */
2154  if (!strncmp(cbuf, "AVID", 4)) {
2155  parse_avid(s, cbuf, len);
2156  } else if (!strcmp(cbuf, "CS=ITU601"))
2157  s->cs_itu601 = 1;
2158  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2159  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2160  s->flipped = 1;
2161  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2162  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2163  s->multiscope = 2;
2164  }
2165 
2166  av_free(cbuf);
2167  }
2168 
2169  return 0;
2170 }
2171 
2172 /* return the 8 bit start code value and update the search
2173  state. Return -1 if no start code found */
2174 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2175 {
2176  const uint8_t *buf_ptr;
2177  unsigned int v, v2;
2178  int val;
2179  int skipped = 0;
2180 
2181  buf_ptr = *pbuf_ptr;
2182  while (buf_end - buf_ptr > 1) {
2183  v = *buf_ptr++;
2184  v2 = *buf_ptr;
2185  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2186  val = *buf_ptr++;
2187  goto found;
2188  }
2189  skipped++;
2190  }
2191  buf_ptr = buf_end;
2192  val = -1;
2193 found:
2194  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2195  *pbuf_ptr = buf_ptr;
2196  return val;
2197 }
2198 
2200  const uint8_t **buf_ptr, const uint8_t *buf_end,
2201  const uint8_t **unescaped_buf_ptr,
2202  int *unescaped_buf_size)
2203 {
2204  int start_code;
2205  start_code = find_marker(buf_ptr, buf_end);
2206 
2207  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2208  if (!s->buffer)
2209  return AVERROR(ENOMEM);
2210 
2211  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2212  if (start_code == SOS && !s->ls) {
2213  const uint8_t *src = *buf_ptr;
2214  const uint8_t *ptr = src;
2215  uint8_t *dst = s->buffer;
2216 
2217  #define copy_data_segment(skip) do { \
2218  ptrdiff_t length = (ptr - src) - (skip); \
2219  if (length > 0) { \
2220  memcpy(dst, src, length); \
2221  dst += length; \
2222  src = ptr; \
2223  } \
2224  } while (0)
2225 
2226  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2227  ptr = buf_end;
2228  copy_data_segment(0);
2229  } else {
2230  while (ptr < buf_end) {
2231  uint8_t x = *(ptr++);
2232 
2233  if (x == 0xff) {
2234  ptrdiff_t skip = 0;
2235  while (ptr < buf_end && x == 0xff) {
2236  x = *(ptr++);
2237  skip++;
2238  }
2239 
2240  /* 0xFF, 0xFF, ... */
2241  if (skip > 1) {
2242  copy_data_segment(skip);
2243 
2244  /* decrement src as it is equal to ptr after the
2245  * copy_data_segment macro and we might want to
2246  * copy the current value of x later on */
2247  src--;
2248  }
2249 
2250  if (x < RST0 || x > RST7) {
2251  copy_data_segment(1);
2252  if (x)
2253  break;
2254  }
2255  }
2256  }
2257  if (src < ptr)
2258  copy_data_segment(0);
2259  }
2260  #undef copy_data_segment
2261 
2262  *unescaped_buf_ptr = s->buffer;
2263  *unescaped_buf_size = dst - s->buffer;
2264  memset(s->buffer + *unescaped_buf_size, 0,
2266 
2267  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2268  (buf_end - *buf_ptr) - (dst - s->buffer));
2269  } else if (start_code == SOS && s->ls) {
2270  const uint8_t *src = *buf_ptr;
2271  uint8_t *dst = s->buffer;
2272  int bit_count = 0;
2273  int t = 0, b = 0;
2274  PutBitContext pb;
2275 
2276  /* find marker */
2277  while (src + t < buf_end) {
2278  uint8_t x = src[t++];
2279  if (x == 0xff) {
2280  while ((src + t < buf_end) && x == 0xff)
2281  x = src[t++];
2282  if (x & 0x80) {
2283  t -= FFMIN(2, t);
2284  break;
2285  }
2286  }
2287  }
2288  bit_count = t * 8;
2289  init_put_bits(&pb, dst, t);
2290 
2291  /* unescape bitstream */
2292  while (b < t) {
2293  uint8_t x = src[b++];
2294  put_bits(&pb, 8, x);
2295  if (x == 0xFF && b < t) {
2296  x = src[b++];
2297  if (x & 0x80) {
2298  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2299  x &= 0x7f;
2300  }
2301  put_bits(&pb, 7, x);
2302  bit_count--;
2303  }
2304  }
2305  flush_put_bits(&pb);
2306 
2307  *unescaped_buf_ptr = dst;
2308  *unescaped_buf_size = (bit_count + 7) >> 3;
2309  memset(s->buffer + *unescaped_buf_size, 0,
2311  } else {
2312  *unescaped_buf_ptr = *buf_ptr;
2313  *unescaped_buf_size = buf_end - *buf_ptr;
2314  }
2315 
2316  return start_code;
2317 }
2318 
2320 {
2321  int i;
2322 
2323  if (s->iccentries) {
2324  for (i = 0; i < s->iccnum; i++)
2325  av_freep(&s->iccentries[i].data);
2326  av_freep(&s->iccentries);
2327  }
2328 
2329  s->iccread = 0;
2330  s->iccnum = 0;
2331 }
2332 
2333 // SMV JPEG just stacks several output frames into one JPEG picture
2334 // we handle that by setting up the cropping parameters appropriately
2336 {
2337  MJpegDecodeContext *s = avctx->priv_data;
2338  int ret;
2339 
2340  if (s->smv_next_frame > 0) {
2341  av_assert0(s->smv_frame->buf[0]);
2343  ret = av_frame_ref(frame, s->smv_frame);
2344  if (ret < 0)
2345  return ret;
2346  } else {
2347  av_assert0(frame->buf[0]);
2348  av_frame_unref(s->smv_frame);
2349  ret = av_frame_ref(s->smv_frame, frame);
2350  if (ret < 0)
2351  return ret;
2352  }
2353 
2354  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2355 
2356  frame->width = avctx->coded_width;
2357  frame->height = avctx->coded_height;
2358  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2359  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2360 
2361  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2362 
2363  if (s->smv_next_frame == 0)
2364  av_frame_unref(s->smv_frame);
2365 
2366  return 0;
2367 }
2368 
2370 {
2371  MJpegDecodeContext *s = avctx->priv_data;
2372  int ret;
2373 
2374  av_packet_unref(s->pkt);
2375  ret = ff_decode_get_packet(avctx, s->pkt);
2376  if (ret < 0)
2377  return ret;
2378 
2379 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2380  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2381  avctx->codec_id == AV_CODEC_ID_AMV) {
2382  ret = ff_sp5x_process_packet(avctx, s->pkt);
2383  if (ret < 0)
2384  return ret;
2385  }
2386 #endif
2387 
2388  s->buf_size = s->pkt->size;
2389 
2390  return 0;
2391 }
2392 
2394 {
2395  MJpegDecodeContext *s = avctx->priv_data;
2396  const uint8_t *buf_end, *buf_ptr;
2397  const uint8_t *unescaped_buf_ptr;
2398  int hshift, vshift;
2399  int unescaped_buf_size;
2400  int start_code;
2401  int i, index;
2402  int ret = 0;
2403  int is16bit;
2404 
2405  s->force_pal8 = 0;
2406 
2407  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2408  return smv_process_frame(avctx, frame);
2409 
2410  av_dict_free(&s->exif_metadata);
2411  av_freep(&s->stereo3d);
2412  s->adobe_transform = -1;
2413 
2414  if (s->iccnum != 0)
2416 
2417  ret = mjpeg_get_packet(avctx);
2418  if (ret < 0)
2419  return ret;
2420 redo_for_pal8:
2421  buf_ptr = s->pkt->data;
2422  buf_end = s->pkt->data + s->pkt->size;
2423  while (buf_ptr < buf_end) {
2424  /* find start next marker */
2425  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2426  &unescaped_buf_ptr,
2427  &unescaped_buf_size);
2428  /* EOF */
2429  if (start_code < 0) {
2430  break;
2431  } else if (unescaped_buf_size > INT_MAX / 8) {
2432  av_log(avctx, AV_LOG_ERROR,
2433  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2434  start_code, unescaped_buf_size, s->pkt->size);
2435  return AVERROR_INVALIDDATA;
2436  }
2437  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2438  start_code, buf_end - buf_ptr);
2439 
2440  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2441 
2442  if (ret < 0) {
2443  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2444  goto fail;
2445  }
2446 
2447  s->start_code = start_code;
2448  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2449  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2450 
2451  /* process markers */
2452  if (start_code >= RST0 && start_code <= RST7) {
2453  av_log(avctx, AV_LOG_DEBUG,
2454  "restart marker: %d\n", start_code & 0x0f);
2455  /* APP fields */
2456  } else if (start_code >= APP0 && start_code <= APP15) {
2457  if ((ret = mjpeg_decode_app(s)) < 0)
2458  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2459  av_err2str(ret));
2460  /* Comment */
2461  } else if (start_code == COM) {
2462  ret = mjpeg_decode_com(s);
2463  if (ret < 0)
2464  return ret;
2465  } else if (start_code == DQT) {
2467  if (ret < 0)
2468  return ret;
2469  }
2470 
2471  ret = -1;
2472 
2473  if (!CONFIG_JPEGLS_DECODER &&
2474  (start_code == SOF48 || start_code == LSE)) {
2475  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2476  return AVERROR(ENOSYS);
2477  }
2478 
2479  if (avctx->skip_frame == AVDISCARD_ALL) {
2480  switch(start_code) {
2481  case SOF0:
2482  case SOF1:
2483  case SOF2:
2484  case SOF3:
2485  case SOF48:
2486  case SOI:
2487  case SOS:
2488  case EOI:
2489  break;
2490  default:
2491  goto skip;
2492  }
2493  }
2494 
2495  switch (start_code) {
2496  case SOI:
2497  s->restart_interval = 0;
2498  s->restart_count = 0;
2499  s->raw_image_buffer = buf_ptr;
2500  s->raw_image_buffer_size = buf_end - buf_ptr;
2501  /* nothing to do on SOI */
2502  break;
2503  case DHT:
2504  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2505  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2506  goto fail;
2507  }
2508  break;
2509  case SOF0:
2510  case SOF1:
2511  if (start_code == SOF0)
2512  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2513  else
2515  s->lossless = 0;
2516  s->ls = 0;
2517  s->progressive = 0;
2518  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2519  goto fail;
2520  break;
2521  case SOF2:
2522  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2523  s->lossless = 0;
2524  s->ls = 0;
2525  s->progressive = 1;
2526  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2527  goto fail;
2528  break;
2529  case SOF3:
2530  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2531  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2532  s->lossless = 1;
2533  s->ls = 0;
2534  s->progressive = 0;
2535  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2536  goto fail;
2537  break;
2538  case SOF48:
2539  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2540  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2541  s->lossless = 1;
2542  s->ls = 1;
2543  s->progressive = 0;
2544  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2545  goto fail;
2546  break;
2547  case LSE:
2548  if (!CONFIG_JPEGLS_DECODER ||
2549  (ret = ff_jpegls_decode_lse(s)) < 0)
2550  goto fail;
2551  if (ret == 1)
2552  goto redo_for_pal8;
2553  break;
2554  case EOI:
2555 eoi_parser:
2556  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2557  s->progressive && s->cur_scan && s->got_picture)
2559  s->cur_scan = 0;
2560  if (!s->got_picture) {
2561  av_log(avctx, AV_LOG_WARNING,
2562  "Found EOI before any SOF, ignoring\n");
2563  break;
2564  }
2565  if (s->interlaced) {
2566  s->bottom_field ^= 1;
2567  /* if not bottom field, do not output image yet */
2568  if (s->bottom_field == !s->interlace_polarity)
2569  break;
2570  }
2571  if (avctx->skip_frame == AVDISCARD_ALL) {
2572  s->got_picture = 0;
2573  ret = AVERROR(EAGAIN);
2574  goto the_end_no_picture;
2575  }
2576  if (s->avctx->hwaccel) {
2577  ret = s->avctx->hwaccel->end_frame(s->avctx);
2578  if (ret < 0)
2579  return ret;
2580 
2581  av_freep(&s->hwaccel_picture_private);
2582  }
2583  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2584  return ret;
2585  s->got_picture = 0;
2586 
2587  frame->pkt_dts = s->pkt->dts;
2588 
2589  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2590  int qp = FFMAX3(s->qscale[0],
2591  s->qscale[1],
2592  s->qscale[2]);
2593 
2594  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2595  }
2596 
2597  goto the_end;
2598  case SOS:
2599  s->raw_scan_buffer = buf_ptr;
2600  s->raw_scan_buffer_size = buf_end - buf_ptr;
2601 
2602  s->cur_scan++;
2603  if (avctx->skip_frame == AVDISCARD_ALL) {
2604  skip_bits(&s->gb, get_bits_left(&s->gb));
2605  break;
2606  }
2607 
2608  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2609  (avctx->err_recognition & AV_EF_EXPLODE))
2610  goto fail;
2611  break;
2612  case DRI:
2613  if ((ret = mjpeg_decode_dri(s)) < 0)
2614  return ret;
2615  break;
2616  case SOF5:
2617  case SOF6:
2618  case SOF7:
2619  case SOF9:
2620  case SOF10:
2621  case SOF11:
2622  case SOF13:
2623  case SOF14:
2624  case SOF15:
2625  case JPG:
2626  av_log(avctx, AV_LOG_ERROR,
2627  "mjpeg: unsupported coding type (%x)\n", start_code);
2628  break;
2629  }
2630 
2631 skip:
2632  /* eof process start code */
2633  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2634  av_log(avctx, AV_LOG_DEBUG,
2635  "marker parser used %d bytes (%d bits)\n",
2636  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2637  }
2638  if (s->got_picture && s->cur_scan) {
2639  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2640  goto eoi_parser;
2641  }
2642  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2643  return AVERROR_INVALIDDATA;
2644 fail:
2645  s->got_picture = 0;
2646  return ret;
2647 the_end:
2648 
2649  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2650 
2651  if (AV_RB32(s->upscale_h)) {
2652  int p;
2654  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2655  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2656  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2657  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2658  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2659  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2660  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2661  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2662  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2663  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2664  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2665  );
2666  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2667  if (ret)
2668  return ret;
2669 
2670  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2671  for (p = 0; p<s->nb_components; p++) {
2672  uint8_t *line = s->picture_ptr->data[p];
2673  int w = s->width;
2674  int h = s->height;
2675  if (!s->upscale_h[p])
2676  continue;
2677  if (p==1 || p==2) {
2678  w = AV_CEIL_RSHIFT(w, hshift);
2679  h = AV_CEIL_RSHIFT(h, vshift);
2680  }
2681  if (s->upscale_v[p] == 1)
2682  h = (h+1)>>1;
2683  av_assert0(w > 0);
2684  for (i = 0; i < h; i++) {
2685  if (s->upscale_h[p] == 1) {
2686  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2687  else line[w - 1] = line[(w - 1) / 2];
2688  for (index = w - 2; index > 0; index--) {
2689  if (is16bit)
2690  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2691  else
2692  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2693  }
2694  } else if (s->upscale_h[p] == 2) {
2695  if (is16bit) {
2696  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2697  if (w > 1)
2698  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2699  } else {
2700  line[w - 1] = line[(w - 1) / 3];
2701  if (w > 1)
2702  line[w - 2] = line[w - 1];
2703  }
2704  for (index = w - 3; index > 0; index--) {
2705  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2706  }
2707  }
2708  line += s->linesize[p];
2709  }
2710  }
2711  }
2712  if (AV_RB32(s->upscale_v)) {
2713  int p;
2715  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2716  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2717  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2719  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2720  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2721  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2722  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2725  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2726  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2727  );
2728  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2729  if (ret)
2730  return ret;
2731 
2732  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2733  for (p = 0; p < s->nb_components; p++) {
2734  uint8_t *dst;
2735  int w = s->width;
2736  int h = s->height;
2737  if (!s->upscale_v[p])
2738  continue;
2739  if (p==1 || p==2) {
2740  w = AV_CEIL_RSHIFT(w, hshift);
2741  h = AV_CEIL_RSHIFT(h, vshift);
2742  }
2743  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2744  for (i = h - 1; i; i--) {
2745  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2746  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2747  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2748  memcpy(dst, src1, w);
2749  } else {
2750  for (index = 0; index < w; index++)
2751  dst[index] = (src1[index] + src2[index]) >> 1;
2752  }
2753  dst -= s->linesize[p];
2754  }
2755  }
2756  }
2757  if (s->flipped && !s->rgb) {
2758  int j;
2759  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2760  if (ret)
2761  return ret;
2762 
2763  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2764  for (index=0; index<s->nb_components; index++) {
2765  uint8_t *dst = s->picture_ptr->data[index];
2766  int w = s->picture_ptr->width;
2767  int h = s->picture_ptr->height;
2768  if(index && index<3){
2769  w = AV_CEIL_RSHIFT(w, hshift);
2770  h = AV_CEIL_RSHIFT(h, vshift);
2771  }
2772  if(dst){
2773  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2774  for (i=0; i<h/2; i++) {
2775  for (j=0; j<w; j++)
2776  FFSWAP(int, dst[j], dst2[j]);
2777  dst += s->picture_ptr->linesize[index];
2778  dst2 -= s->picture_ptr->linesize[index];
2779  }
2780  }
2781  }
2782  }
2783  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2784  int w = s->picture_ptr->width;
2785  int h = s->picture_ptr->height;
2786  av_assert0(s->nb_components == 4);
2787  for (i=0; i<h; i++) {
2788  int j;
2789  uint8_t *dst[4];
2790  for (index=0; index<4; index++) {
2791  dst[index] = s->picture_ptr->data[index]
2792  + s->picture_ptr->linesize[index]*i;
2793  }
2794  for (j=0; j<w; j++) {
2795  int k = dst[3][j];
2796  int r = dst[0][j] * k;
2797  int g = dst[1][j] * k;
2798  int b = dst[2][j] * k;
2799  dst[0][j] = g*257 >> 16;
2800  dst[1][j] = b*257 >> 16;
2801  dst[2][j] = r*257 >> 16;
2802  dst[3][j] = 255;
2803  }
2804  }
2805  }
2806  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2807  int w = s->picture_ptr->width;
2808  int h = s->picture_ptr->height;
2809  av_assert0(s->nb_components == 4);
2810  for (i=0; i<h; i++) {
2811  int j;
2812  uint8_t *dst[4];
2813  for (index=0; index<4; index++) {
2814  dst[index] = s->picture_ptr->data[index]
2815  + s->picture_ptr->linesize[index]*i;
2816  }
2817  for (j=0; j<w; j++) {
2818  int k = dst[3][j];
2819  int r = (255 - dst[0][j]) * k;
2820  int g = (128 - dst[1][j]) * k;
2821  int b = (128 - dst[2][j]) * k;
2822  dst[0][j] = r*257 >> 16;
2823  dst[1][j] = (g*257 >> 16) + 128;
2824  dst[2][j] = (b*257 >> 16) + 128;
2825  dst[3][j] = 255;
2826  }
2827  }
2828  }
2829 
2830  if (s->stereo3d) {
2832  if (stereo) {
2833  stereo->type = s->stereo3d->type;
2834  stereo->flags = s->stereo3d->flags;
2835  }
2836  av_freep(&s->stereo3d);
2837  }
2838 
2839  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2840  AVFrameSideData *sd;
2841  size_t offset = 0;
2842  int total_size = 0;
2843  int i;
2844 
2845  /* Sum size of all parts. */
2846  for (i = 0; i < s->iccnum; i++)
2847  total_size += s->iccentries[i].length;
2848 
2850  if (!sd) {
2851  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2852  return AVERROR(ENOMEM);
2853  }
2854 
2855  /* Reassemble the parts, which are now in-order. */
2856  for (i = 0; i < s->iccnum; i++) {
2857  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2858  offset += s->iccentries[i].length;
2859  }
2860  }
2861 
2862  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2863  av_dict_free(&s->exif_metadata);
2864 
2865  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2866  ret = smv_process_frame(avctx, frame);
2867  if (ret < 0) {
2869  return ret;
2870  }
2871  }
2872  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2873  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2874  avctx->coded_height > s->orig_height) {
2875  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2876  frame->crop_top = frame->height - avctx->height;
2877  }
2878 
2879  ret = 0;
2880 
2881 the_end_no_picture:
2882  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2883  buf_end - buf_ptr);
2884 
2885  return ret;
2886 }
2887 
2888 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2889  * even without having called ff_mjpeg_decode_init(). */
2891 {
2892  MJpegDecodeContext *s = avctx->priv_data;
2893  int i, j;
2894 
2895  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2896  av_log(avctx, AV_LOG_INFO, "Single field\n");
2897  }
2898 
2899  if (s->picture) {
2900  av_frame_free(&s->picture);
2901  s->picture_ptr = NULL;
2902  } else if (s->picture_ptr)
2903  av_frame_unref(s->picture_ptr);
2904 
2905  av_packet_free(&s->pkt);
2906 
2907  av_frame_free(&s->smv_frame);
2908 
2909  av_freep(&s->buffer);
2910  av_freep(&s->stereo3d);
2911  av_freep(&s->ljpeg_buffer);
2912  s->ljpeg_buffer_size = 0;
2913 
2914  for (i = 0; i < 3; i++) {
2915  for (j = 0; j < 4; j++)
2916  ff_free_vlc(&s->vlcs[i][j]);
2917  }
2918  for (i = 0; i < MAX_COMPONENTS; i++) {
2919  av_freep(&s->blocks[i]);
2920  av_freep(&s->last_nnz[i]);
2921  }
2922  av_dict_free(&s->exif_metadata);
2923 
2925 
2926  av_freep(&s->hwaccel_picture_private);
2927  av_freep(&s->jls_state);
2928 
2929  return 0;
2930 }
2931 
2932 static void decode_flush(AVCodecContext *avctx)
2933 {
2934  MJpegDecodeContext *s = avctx->priv_data;
2935  s->got_picture = 0;
2936 
2937  s->smv_next_frame = 0;
2938  av_frame_unref(s->smv_frame);
2939 }
2940 
2941 #if CONFIG_MJPEG_DECODER
2942 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2943 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2944 static const AVOption options[] = {
2945  { "extern_huff", "Use external huffman table.",
2946  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2947  { NULL },
2948 };
2949 
2950 static const AVClass mjpegdec_class = {
2951  .class_name = "MJPEG decoder",
2952  .item_name = av_default_item_name,
2953  .option = options,
2954  .version = LIBAVUTIL_VERSION_INT,
2955 };
2956 
2957 const AVCodec ff_mjpeg_decoder = {
2958  .name = "mjpeg",
2959  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2960  .type = AVMEDIA_TYPE_VIDEO,
2961  .id = AV_CODEC_ID_MJPEG,
2962  .priv_data_size = sizeof(MJpegDecodeContext),
2964  .close = ff_mjpeg_decode_end,
2966  .flush = decode_flush,
2967  .capabilities = AV_CODEC_CAP_DR1,
2968  .max_lowres = 3,
2969  .priv_class = &mjpegdec_class,
2973  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2974 #if CONFIG_MJPEG_NVDEC_HWACCEL
2975  HWACCEL_NVDEC(mjpeg),
2976 #endif
2977 #if CONFIG_MJPEG_VAAPI_HWACCEL
2978  HWACCEL_VAAPI(mjpeg),
2979 #endif
2980  NULL
2981  },
2982 };
2983 #endif
2984 #if CONFIG_THP_DECODER
2985 const AVCodec ff_thp_decoder = {
2986  .name = "thp",
2987  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2988  .type = AVMEDIA_TYPE_VIDEO,
2989  .id = AV_CODEC_ID_THP,
2990  .priv_data_size = sizeof(MJpegDecodeContext),
2992  .close = ff_mjpeg_decode_end,
2994  .flush = decode_flush,
2995  .capabilities = AV_CODEC_CAP_DR1,
2996  .max_lowres = 3,
2999 };
3000 #endif
3001 
3002 #if CONFIG_SMVJPEG_DECODER
3003 const AVCodec ff_smvjpeg_decoder = {
3004  .name = "smvjpeg",
3005  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
3006  .type = AVMEDIA_TYPE_VIDEO,
3007  .id = AV_CODEC_ID_SMVJPEG,
3008  .priv_data_size = sizeof(MJpegDecodeContext),
3010  .close = ff_mjpeg_decode_end,
3012  .flush = decode_flush,
3013  .capabilities = AV_CODEC_CAP_DR1,
3016 };
3017 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:103
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:403
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1465
AVCodec
AVCodec.
Definition: codec.h:197
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:222
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:187
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:56
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1066
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1072
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1390
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:604
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2932
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2541
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:952
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:478
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1430
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:108
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:187
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:219
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
index
fg index
Definition: ffmpeg_filter.c:168
AVFrame::width
int width
Definition: frame.h:361
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:431
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1735
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2335
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1095
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:786
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:142
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:196
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2369
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:149
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:70
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1409
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2581
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:508
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
rgb
Definition: rpzaenc.c:58
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:236
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1236
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1406
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:510
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:117
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1779
fail
#define fail()
Definition: checkasm.h:134
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:433
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1731
GetBitContext
Definition: get_bits.h:61
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2134
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:54
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:581
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2569
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:689
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:373
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:61
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:98
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
aligned
static int aligned(int val)
Definition: dashdec.c:168
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:853
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:401
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1929
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1733
mask
static const uint16_t mask[17]
Definition: lzw.c:38
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1035
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:603
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:104
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:97
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:402
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1734
g
const char * g
Definition: vf_curves.c:117
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:479
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:350
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:289
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:202
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:400
ff_thp_decoder
const AVCodec ff_thp_decoder
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2319
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2890
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:420
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:49
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:511
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2393
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:213
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:408
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:379
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:59
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
av_clip_int16
#define av_clip_int16
Definition: common.h:137
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
ff_smvjpeg_decoder
const AVCodec ff_smvjpeg_decoder
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:380
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:592
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1594
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:203
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:195
SOF13
@ SOF13
Definition: mjpeg.h:52
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:555
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1421
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:970
lowres
static int lowres
Definition: ffplay.c:336
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1542
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1441
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:67
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1538
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1638
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1070
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:326
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:871
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1629
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:262
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1110
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:59
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2174
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:192
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
src1
#define src1
Definition: h264pred.c:140
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2208
i
int i
Definition: input.c:407
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:804
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:602
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1732
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1807
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1416
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:436
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1080
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:674
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:711
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:559
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:944
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:175
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1413
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:192
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2199
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:501
AVFrame::height
int height
Definition: frame.h:361
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:598
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:106
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:427
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1408
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:297
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:689
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
SOI
@ SOI
Definition: mjpeg.h:70
ff_mjpeg_decoder
const AVCodec ff_mjpeg_decoder
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1819
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1129
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:526
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:528
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:508
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:315
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:82