FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "hwconfig.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 #include "mjpegdec.h"
45 #include "jpeglsdec.h"
46 #include "profiles.h"
47 #include "put_bits.h"
48 #include "tiff.h"
49 #include "exif.h"
50 #include "bytestream.h"
51 
52 
53 static int build_vlc(VLC *vlc, const uint8_t *bits_table,
54  const uint8_t *val_table, int nb_codes,
55  int use_static, int is_ac)
56 {
57  uint8_t huff_size[256] = { 0 };
58  uint16_t huff_code[256];
59  uint16_t huff_sym[256];
60  int i;
61 
62  av_assert0(nb_codes <= 256);
63 
64  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
65 
66  for (i = 0; i < 256; i++)
67  huff_sym[i] = i + 16 * is_ac;
68 
69  if (is_ac)
70  huff_sym[0] = 16 * 256;
71 
72  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
73  huff_code, 2, 2, huff_sym, 2, 2, use_static);
74 }
75 
77 {
78  static const struct {
79  int class;
80  int index;
81  const uint8_t *bits;
82  const uint8_t *values;
83  int codes;
84  int length;
85  } ht[] = {
87  avpriv_mjpeg_val_dc, 12, 12 },
89  avpriv_mjpeg_val_dc, 12, 12 },
98  };
99  int i, ret;
100 
101  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
102  ret = build_vlc(&s->vlcs[ht[i].class][ht[i].index],
103  ht[i].bits, ht[i].values, ht[i].codes,
104  0, ht[i].class == 1);
105  if (ret < 0)
106  return ret;
107 
108  if (ht[i].class < 2) {
109  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
110  ht[i].bits + 1, 16);
111  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
112  ht[i].values, ht[i].length);
113  }
114  }
115 
116  return 0;
117 }
118 
119 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
120 {
121  s->buggy_avid = 1;
122  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
123  s->interlace_polarity = 1;
124  if (len > 14 && buf[12] == 2) /* 2 - PAL */
125  s->interlace_polarity = 0;
126  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
127  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
128 }
129 
130 static void init_idct(AVCodecContext *avctx)
131 {
132  MJpegDecodeContext *s = avctx->priv_data;
133 
134  ff_idctdsp_init(&s->idsp, avctx);
135  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
137 }
138 
140 {
141  MJpegDecodeContext *s = avctx->priv_data;
142  int ret;
143 
144  if (!s->picture_ptr) {
145  s->picture = av_frame_alloc();
146  if (!s->picture)
147  return AVERROR(ENOMEM);
148  s->picture_ptr = s->picture;
149  }
150 
151  s->avctx = avctx;
152  ff_blockdsp_init(&s->bdsp, avctx);
153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
154  init_idct(avctx);
155  s->buffer_size = 0;
156  s->buffer = NULL;
157  s->start_code = -1;
158  s->first_picture = 1;
159  s->got_picture = 0;
160  s->org_height = avctx->coded_height;
162  avctx->colorspace = AVCOL_SPC_BT470BG;
163  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
164 
165  if ((ret = init_default_huffman_tables(s)) < 0)
166  return ret;
167 
168  if (s->extern_huff) {
169  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
170  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
171  return ret;
172  if (ff_mjpeg_decode_dht(s)) {
173  av_log(avctx, AV_LOG_ERROR,
174  "error using external huffman table, switching back to internal\n");
176  }
177  }
178  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
179  s->interlace_polarity = 1; /* bottom field first */
180  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
181  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
182  if (avctx->codec_tag == AV_RL32("MJPG"))
183  s->interlace_polarity = 1;
184  }
185 
186  if ( avctx->extradata_size > 8
187  && AV_RL32(avctx->extradata) == 0x2C
188  && AV_RL32(avctx->extradata+4) == 0x18) {
189  parse_avid(s, avctx->extradata, avctx->extradata_size);
190  }
191 
192  if (avctx->codec->id == AV_CODEC_ID_AMV)
193  s->flipped = 1;
194 
195  return 0;
196 }
197 
198 
199 /* quantize tables */
201 {
202  int len, index, i;
203 
204  len = get_bits(&s->gb, 16) - 2;
205 
206  if (8*len > get_bits_left(&s->gb)) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
208  return AVERROR_INVALIDDATA;
209  }
210 
211  while (len >= 65) {
212  int pr = get_bits(&s->gb, 4);
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  index = get_bits(&s->gb, 4);
218  if (index >= 4)
219  return -1;
220  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
221  /* read quant table */
222  for (i = 0; i < 64; i++) {
223  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
224  if (s->quant_matrixes[index][i] == 0) {
225  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v, code_max;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  code_max = 0;
274  for (i = 0; i < n; i++) {
275  v = get_bits(&s->gb, 8);
276  if (v > code_max)
277  code_max = v;
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_free_vlc(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, code_max + 1);
286  if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
287  code_max + 1, 0, class > 0)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_free_vlc(&s->vlcs[2][index]);
292  if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
293  code_max + 1, 0, 0)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  len = get_bits(&s->gb, 16);
317  bits = get_bits(&s->gb, 8);
318 
319  if (bits > 16 || bits < 1) {
320  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
321  return AVERROR_INVALIDDATA;
322  }
323 
324  if (s->avctx->bits_per_raw_sample != bits) {
325  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
326  s->avctx->bits_per_raw_sample = bits;
327  init_idct(s->avctx);
328  }
329  if (s->pegasus_rct)
330  bits = 9;
331  if (bits == 9 && !s->pegasus_rct)
332  s->rct = 1; // FIXME ugly
333 
334  if(s->lossless && s->avctx->lowres){
335  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
336  return -1;
337  }
338 
339  height = get_bits(&s->gb, 16);
340  width = get_bits(&s->gb, 16);
341 
342  // HACK for odd_height.mov
343  if (s->interlaced && s->width == width && s->height == height + 1)
344  height= s->height;
345 
346  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
347  if (av_image_check_size(width, height, 0, s->avctx) < 0)
348  return AVERROR_INVALIDDATA;
349  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
350  return AVERROR_INVALIDDATA;
351 
352  nb_components = get_bits(&s->gb, 8);
353  if (nb_components <= 0 ||
354  nb_components > MAX_COMPONENTS)
355  return -1;
356  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
357  if (nb_components != s->nb_components) {
358  av_log(s->avctx, AV_LOG_ERROR,
359  "nb_components changing in interlaced picture\n");
360  return AVERROR_INVALIDDATA;
361  }
362  }
363  if (s->ls && !(bits <= 8 || nb_components == 1)) {
365  "JPEG-LS that is not <= 8 "
366  "bits/component or 16-bit gray");
367  return AVERROR_PATCHWELCOME;
368  }
369  if (len != 8 + 3 * nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
371  return AVERROR_INVALIDDATA;
372  }
373 
374  s->nb_components = nb_components;
375  s->h_max = 1;
376  s->v_max = 1;
377  for (i = 0; i < nb_components; i++) {
378  /* component id */
379  s->component_id[i] = get_bits(&s->gb, 8) - 1;
380  h_count[i] = get_bits(&s->gb, 4);
381  v_count[i] = get_bits(&s->gb, 4);
382  /* compute hmax and vmax (only used in interleaved case) */
383  if (h_count[i] > s->h_max)
384  s->h_max = h_count[i];
385  if (v_count[i] > s->v_max)
386  s->v_max = v_count[i];
387  s->quant_index[i] = get_bits(&s->gb, 8);
388  if (s->quant_index[i] >= 4) {
389  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
390  return AVERROR_INVALIDDATA;
391  }
392  if (!h_count[i] || !v_count[i]) {
393  av_log(s->avctx, AV_LOG_ERROR,
394  "Invalid sampling factor in component %d %d:%d\n",
395  i, h_count[i], v_count[i]);
396  return AVERROR_INVALIDDATA;
397  }
398 
399  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
400  i, h_count[i], v_count[i],
401  s->component_id[i], s->quant_index[i]);
402  }
403  if ( nb_components == 4
404  && s->component_id[0] == 'C' - 1
405  && s->component_id[1] == 'M' - 1
406  && s->component_id[2] == 'Y' - 1
407  && s->component_id[3] == 'K' - 1)
408  s->adobe_transform = 0;
409 
410  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
411  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
412  return AVERROR_PATCHWELCOME;
413  }
414 
415  if (s->bayer) {
416  if (nb_components == 2) {
417  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
418  width stored in their SOF3 markers is the width of each one. We only output
419  a single component, therefore we need to adjust the output image width. We
420  handle the deinterleaving (but not the debayering) in this file. */
421  width *= 2;
422  }
423  /* They can also contain 1 component, which is double the width and half the height
424  of the final image (rows are interleaved). We don't handle the decoding in this
425  file, but leave that to the TIFF/DNG decoder. */
426  }
427 
428  /* if different size, realloc/alloc picture */
429  if (width != s->width || height != s->height || bits != s->bits ||
430  memcmp(s->h_count, h_count, sizeof(h_count)) ||
431  memcmp(s->v_count, v_count, sizeof(v_count))) {
432  size_change = 1;
433 
434  s->width = width;
435  s->height = height;
436  s->bits = bits;
437  memcpy(s->h_count, h_count, sizeof(h_count));
438  memcpy(s->v_count, v_count, sizeof(v_count));
439  s->interlaced = 0;
440  s->got_picture = 0;
441 
442  /* test interlaced mode */
443  if (s->first_picture &&
444  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
445  s->org_height != 0 &&
446  s->height < ((s->org_height * 3) / 4)) {
447  s->interlaced = 1;
448  s->bottom_field = s->interlace_polarity;
449  s->picture_ptr->interlaced_frame = 1;
450  s->picture_ptr->top_field_first = !s->interlace_polarity;
451  height *= 2;
452  }
453 
454  ret = ff_set_dimensions(s->avctx, width, height);
455  if (ret < 0)
456  return ret;
457 
458  s->first_picture = 0;
459  } else {
460  size_change = 0;
461  }
462 
463  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
464  if (s->progressive) {
465  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
466  return AVERROR_INVALIDDATA;
467  }
468  } else {
469  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
470  s->rgb = 1;
471  else if (!s->lossless)
472  s->rgb = 0;
473  /* XXX: not complete test ! */
474  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
475  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
476  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
477  (s->h_count[3] << 4) | s->v_count[3];
478  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
479  /* NOTE we do not allocate pictures large enough for the possible
480  * padding of h/v_count being 4 */
481  if (!(pix_fmt_id & 0xD0D0D0D0))
482  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483  if (!(pix_fmt_id & 0x0D0D0D0D))
484  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
485 
486  for (i = 0; i < 8; i++) {
487  int j = 6 + (i&1) - (i&6);
488  int is = (pix_fmt_id >> (4*i)) & 0xF;
489  int js = (pix_fmt_id >> (4*j)) & 0xF;
490 
491  if (is == 1 && js != 2 && (i < 2 || i > 5))
492  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
493  if (is == 1 && js != 2 && (i < 2 || i > 5))
494  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
495 
496  if (is == 1 && js == 2) {
497  if (i & 1) s->upscale_h[j/2] = 1;
498  else s->upscale_v[j/2] = 1;
499  }
500  }
501 
502  if (s->bayer) {
503  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
504  goto unk_pixfmt;
505  }
506 
507  switch (pix_fmt_id) {
508  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
509  if (!s->bayer)
510  goto unk_pixfmt;
511  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
512  break;
513  case 0x11111100:
514  if (s->rgb)
515  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
516  else {
517  if ( s->adobe_transform == 0
518  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
519  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
520  } else {
521  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
522  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
523  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
524  }
525  }
526  av_assert0(s->nb_components == 3);
527  break;
528  case 0x11111111:
529  if (s->rgb)
530  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
531  else {
532  if (s->adobe_transform == 0 && s->bits <= 8) {
533  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
534  } else {
535  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
536  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
537  }
538  }
539  av_assert0(s->nb_components == 4);
540  break;
541  case 0x22111122:
542  case 0x22111111:
543  if (s->adobe_transform == 0 && s->bits <= 8) {
544  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
545  s->upscale_v[1] = s->upscale_v[2] = 1;
546  s->upscale_h[1] = s->upscale_h[2] = 1;
547  } else if (s->adobe_transform == 2 && s->bits <= 8) {
548  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
549  s->upscale_v[1] = s->upscale_v[2] = 1;
550  s->upscale_h[1] = s->upscale_h[2] = 1;
551  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
552  } else {
553  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
554  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
555  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
556  }
557  av_assert0(s->nb_components == 4);
558  break;
559  case 0x12121100:
560  case 0x22122100:
561  case 0x21211100:
562  case 0x22211200:
563  case 0x22221100:
564  case 0x22112200:
565  case 0x11222200:
566  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
567  else
568  goto unk_pixfmt;
569  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
570  break;
571  case 0x11000000:
572  case 0x13000000:
573  case 0x14000000:
574  case 0x31000000:
575  case 0x33000000:
576  case 0x34000000:
577  case 0x41000000:
578  case 0x43000000:
579  case 0x44000000:
580  if(s->bits <= 8)
581  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
582  else
583  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
584  break;
585  case 0x12111100:
586  case 0x14121200:
587  case 0x14111100:
588  case 0x22211100:
589  case 0x22112100:
590  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
591  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
592  else
593  goto unk_pixfmt;
594  s->upscale_v[0] = s->upscale_v[1] = 1;
595  } else {
596  if (pix_fmt_id == 0x14111100)
597  s->upscale_v[1] = s->upscale_v[2] = 1;
598  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
599  else
600  goto unk_pixfmt;
601  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
602  }
603  break;
604  case 0x21111100:
605  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
606  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
607  else
608  goto unk_pixfmt;
609  s->upscale_h[0] = s->upscale_h[1] = 1;
610  } else {
611  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
612  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
613  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
614  }
615  break;
616  case 0x31111100:
617  if (s->bits > 8)
618  goto unk_pixfmt;
619  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
620  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
621  s->upscale_h[1] = s->upscale_h[2] = 2;
622  break;
623  case 0x22121100:
624  case 0x22111200:
625  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
626  else
627  goto unk_pixfmt;
628  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
629  break;
630  case 0x22111100:
631  case 0x23111100:
632  case 0x42111100:
633  case 0x24111100:
634  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
635  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
636  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
637  if (pix_fmt_id == 0x42111100) {
638  if (s->bits > 8)
639  goto unk_pixfmt;
640  s->upscale_h[1] = s->upscale_h[2] = 1;
641  } else if (pix_fmt_id == 0x24111100) {
642  if (s->bits > 8)
643  goto unk_pixfmt;
644  s->upscale_v[1] = s->upscale_v[2] = 1;
645  } else if (pix_fmt_id == 0x23111100) {
646  if (s->bits > 8)
647  goto unk_pixfmt;
648  s->upscale_v[1] = s->upscale_v[2] = 2;
649  }
650  break;
651  case 0x41111100:
652  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
653  else
654  goto unk_pixfmt;
655  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
656  break;
657  default:
658  unk_pixfmt:
659  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
660  memset(s->upscale_h, 0, sizeof(s->upscale_h));
661  memset(s->upscale_v, 0, sizeof(s->upscale_v));
662  return AVERROR_PATCHWELCOME;
663  }
664  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
665  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
666  return AVERROR_PATCHWELCOME;
667  }
668  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
669  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
670  return AVERROR_PATCHWELCOME;
671  }
672  if (s->ls) {
673  memset(s->upscale_h, 0, sizeof(s->upscale_h));
674  memset(s->upscale_v, 0, sizeof(s->upscale_v));
675  if (s->nb_components == 3) {
676  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
677  } else if (s->nb_components != 1) {
678  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
679  return AVERROR_PATCHWELCOME;
680  } else if (s->palette_index && s->bits <= 8)
681  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
682  else if (s->bits <= 8)
683  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
684  else
685  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
686  }
687 
688  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
689  if (!s->pix_desc) {
690  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
691  return AVERROR_BUG;
692  }
693 
694  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
695  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
696  } else {
697  enum AVPixelFormat pix_fmts[] = {
698 #if CONFIG_MJPEG_NVDEC_HWACCEL
700 #endif
701 #if CONFIG_MJPEG_VAAPI_HWACCEL
703 #endif
704  s->avctx->pix_fmt,
706  };
707  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
708  if (s->hwaccel_pix_fmt < 0)
709  return AVERROR(EINVAL);
710 
711  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
712  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
713  }
714 
715  if (s->avctx->skip_frame == AVDISCARD_ALL) {
716  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
717  s->picture_ptr->key_frame = 1;
718  s->got_picture = 1;
719  return 0;
720  }
721 
722  av_frame_unref(s->picture_ptr);
723  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
724  return -1;
725  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
726  s->picture_ptr->key_frame = 1;
727  s->got_picture = 1;
728 
729  for (i = 0; i < 4; i++)
730  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
731 
732  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
733  s->width, s->height, s->linesize[0], s->linesize[1],
734  s->interlaced, s->avctx->height);
735 
736  }
737 
738  if ((s->rgb && !s->lossless && !s->ls) ||
739  (!s->rgb && s->ls && s->nb_components > 1) ||
740  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
741  ) {
742  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
743  return AVERROR_PATCHWELCOME;
744  }
745 
746  /* totally blank picture as progressive JPEG will only add details to it */
747  if (s->progressive) {
748  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
749  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
750  for (i = 0; i < s->nb_components; i++) {
751  int size = bw * bh * s->h_count[i] * s->v_count[i];
752  av_freep(&s->blocks[i]);
753  av_freep(&s->last_nnz[i]);
754  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
755  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
756  if (!s->blocks[i] || !s->last_nnz[i])
757  return AVERROR(ENOMEM);
758  s->block_stride[i] = bw * s->h_count[i];
759  }
760  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
761  }
762 
763  if (s->avctx->hwaccel) {
764  s->hwaccel_picture_private =
765  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
766  if (!s->hwaccel_picture_private)
767  return AVERROR(ENOMEM);
768 
769  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
770  s->raw_image_buffer_size);
771  if (ret < 0)
772  return ret;
773  }
774 
775  return 0;
776 }
777 
778 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
779 {
780  int code;
781  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
782  if (code < 0 || code > 16) {
783  av_log(s->avctx, AV_LOG_WARNING,
784  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
785  0, dc_index, &s->vlcs[0][dc_index]);
786  return 0xfffff;
787  }
788 
789  if (code)
790  return get_xbits(&s->gb, code);
791  else
792  return 0;
793 }
794 
795 /* decode block and dequantize */
796 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
797  int dc_index, int ac_index, uint16_t *quant_matrix)
798 {
799  int code, i, j, level, val;
800 
801  /* DC coef */
802  val = mjpeg_decode_dc(s, dc_index);
803  if (val == 0xfffff) {
804  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
805  return AVERROR_INVALIDDATA;
806  }
807  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
808  val = av_clip_int16(val);
809  s->last_dc[component] = val;
810  block[0] = val;
811  /* AC coefs */
812  i = 0;
813  {OPEN_READER(re, &s->gb);
814  do {
815  UPDATE_CACHE(re, &s->gb);
816  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
817 
818  i += ((unsigned)code) >> 4;
819  code &= 0xf;
820  if (code) {
821  if (code > MIN_CACHE_BITS - 16)
822  UPDATE_CACHE(re, &s->gb);
823 
824  {
825  int cache = GET_CACHE(re, &s->gb);
826  int sign = (~cache) >> 31;
827  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
828  }
829 
830  LAST_SKIP_BITS(re, &s->gb, code);
831 
832  if (i > 63) {
833  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
834  return AVERROR_INVALIDDATA;
835  }
836  j = s->scantable.permutated[i];
837  block[j] = level * quant_matrix[i];
838  }
839  } while (i < 63);
840  CLOSE_READER(re, &s->gb);}
841 
842  return 0;
843 }
844 
846  int component, int dc_index,
847  uint16_t *quant_matrix, int Al)
848 {
849  unsigned val;
850  s->bdsp.clear_block(block);
851  val = mjpeg_decode_dc(s, dc_index);
852  if (val == 0xfffff) {
853  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
854  return AVERROR_INVALIDDATA;
855  }
856  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
857  s->last_dc[component] = val;
858  block[0] = val;
859  return 0;
860 }
861 
862 /* decode block and dequantize - progressive JPEG version */
864  uint8_t *last_nnz, int ac_index,
865  uint16_t *quant_matrix,
866  int ss, int se, int Al, int *EOBRUN)
867 {
868  int code, i, j, val, run;
869  unsigned level;
870 
871  if (*EOBRUN) {
872  (*EOBRUN)--;
873  return 0;
874  }
875 
876  {
877  OPEN_READER(re, &s->gb);
878  for (i = ss; ; i++) {
879  UPDATE_CACHE(re, &s->gb);
880  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
881 
882  run = ((unsigned) code) >> 4;
883  code &= 0xF;
884  if (code) {
885  i += run;
886  if (code > MIN_CACHE_BITS - 16)
887  UPDATE_CACHE(re, &s->gb);
888 
889  {
890  int cache = GET_CACHE(re, &s->gb);
891  int sign = (~cache) >> 31;
892  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
893  }
894 
895  LAST_SKIP_BITS(re, &s->gb, code);
896 
897  if (i >= se) {
898  if (i == se) {
899  j = s->scantable.permutated[se];
900  block[j] = level * (quant_matrix[se] << Al);
901  break;
902  }
903  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
904  return AVERROR_INVALIDDATA;
905  }
906  j = s->scantable.permutated[i];
907  block[j] = level * (quant_matrix[i] << Al);
908  } else {
909  if (run == 0xF) {// ZRL - skip 15 coefficients
910  i += 15;
911  if (i >= se) {
912  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
913  return AVERROR_INVALIDDATA;
914  }
915  } else {
916  val = (1 << run);
917  if (run) {
918  UPDATE_CACHE(re, &s->gb);
919  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
920  LAST_SKIP_BITS(re, &s->gb, run);
921  }
922  *EOBRUN = val - 1;
923  break;
924  }
925  }
926  }
927  CLOSE_READER(re, &s->gb);
928  }
929 
930  if (i > *last_nnz)
931  *last_nnz = i;
932 
933  return 0;
934 }
935 
936 #define REFINE_BIT(j) { \
937  UPDATE_CACHE(re, &s->gb); \
938  sign = block[j] >> 15; \
939  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
940  ((quant_matrix[i] ^ sign) - sign) << Al; \
941  LAST_SKIP_BITS(re, &s->gb, 1); \
942 }
943 
944 #define ZERO_RUN \
945 for (; ; i++) { \
946  if (i > last) { \
947  i += run; \
948  if (i > se) { \
949  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
950  return -1; \
951  } \
952  break; \
953  } \
954  j = s->scantable.permutated[i]; \
955  if (block[j]) \
956  REFINE_BIT(j) \
957  else if (run-- == 0) \
958  break; \
959 }
960 
961 /* decode block and dequantize - progressive JPEG refinement pass */
963  uint8_t *last_nnz,
964  int ac_index, uint16_t *quant_matrix,
965  int ss, int se, int Al, int *EOBRUN)
966 {
967  int code, i = ss, j, sign, val, run;
968  int last = FFMIN(se, *last_nnz);
969 
970  OPEN_READER(re, &s->gb);
971  if (*EOBRUN) {
972  (*EOBRUN)--;
973  } else {
974  for (; ; i++) {
975  UPDATE_CACHE(re, &s->gb);
976  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
977 
978  if (code & 0xF) {
979  run = ((unsigned) code) >> 4;
980  UPDATE_CACHE(re, &s->gb);
981  val = SHOW_UBITS(re, &s->gb, 1);
982  LAST_SKIP_BITS(re, &s->gb, 1);
983  ZERO_RUN;
984  j = s->scantable.permutated[i];
985  val--;
986  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
987  if (i == se) {
988  if (i > *last_nnz)
989  *last_nnz = i;
990  CLOSE_READER(re, &s->gb);
991  return 0;
992  }
993  } else {
994  run = ((unsigned) code) >> 4;
995  if (run == 0xF) {
996  ZERO_RUN;
997  } else {
998  val = run;
999  run = (1 << run);
1000  if (val) {
1001  UPDATE_CACHE(re, &s->gb);
1002  run += SHOW_UBITS(re, &s->gb, val);
1003  LAST_SKIP_BITS(re, &s->gb, val);
1004  }
1005  *EOBRUN = run - 1;
1006  break;
1007  }
1008  }
1009  }
1010 
1011  if (i > *last_nnz)
1012  *last_nnz = i;
1013  }
1014 
1015  for (; i <= last; i++) {
1016  j = s->scantable.permutated[i];
1017  if (block[j])
1018  REFINE_BIT(j)
1019  }
1020  CLOSE_READER(re, &s->gb);
1021 
1022  return 0;
1023 }
1024 #undef REFINE_BIT
1025 #undef ZERO_RUN
1026 
1027 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1028 {
1029  int i;
1030  int reset = 0;
1031 
1032  if (s->restart_interval) {
1033  s->restart_count--;
1034  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1035  align_get_bits(&s->gb);
1036  for (i = 0; i < nb_components; i++) /* reset dc */
1037  s->last_dc[i] = (4 << s->bits);
1038  }
1039 
1040  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1041  /* skip RSTn */
1042  if (s->restart_count == 0) {
1043  if( show_bits(&s->gb, i) == (1 << i) - 1
1044  || show_bits(&s->gb, i) == 0xFF) {
1045  int pos = get_bits_count(&s->gb);
1046  align_get_bits(&s->gb);
1047  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1048  skip_bits(&s->gb, 8);
1049  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1050  for (i = 0; i < nb_components; i++) /* reset dc */
1051  s->last_dc[i] = (4 << s->bits);
1052  reset = 1;
1053  } else
1054  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1055  }
1056  }
1057  }
1058  return reset;
1059 }
1060 
1061 /* Handles 1 to 4 components */
1062 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1063 {
1064  int i, mb_x, mb_y;
1065  unsigned width;
1066  uint16_t (*buffer)[4];
1067  int left[4], top[4], topleft[4];
1068  const int linesize = s->linesize[0];
1069  const int mask = ((1 << s->bits) - 1) << point_transform;
1070  int resync_mb_y = 0;
1071  int resync_mb_x = 0;
1072  int vpred[6];
1073 
1074  if (!s->bayer && s->nb_components < 3)
1075  return AVERROR_INVALIDDATA;
1076  if (s->bayer && s->nb_components > 2)
1077  return AVERROR_INVALIDDATA;
1078  if (s->nb_components <= 0 || s->nb_components > 4)
1079  return AVERROR_INVALIDDATA;
1080  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1081  return AVERROR_INVALIDDATA;
1082 
1083 
1084  s->restart_count = s->restart_interval;
1085 
1086  if (s->restart_interval == 0)
1087  s->restart_interval = INT_MAX;
1088 
1089  if (s->bayer)
1090  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1091  else
1092  width = s->mb_width;
1093 
1094  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1095  if (!s->ljpeg_buffer)
1096  return AVERROR(ENOMEM);
1097 
1098  buffer = s->ljpeg_buffer;
1099 
1100  for (i = 0; i < 4; i++)
1101  buffer[0][i] = 1 << (s->bits - 1);
1102 
1103  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1104  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1105 
1106  if (s->interlaced && s->bottom_field)
1107  ptr += linesize >> 1;
1108 
1109  for (i = 0; i < 4; i++)
1110  top[i] = left[i] = topleft[i] = buffer[0][i];
1111 
1112  if ((mb_y * s->width) % s->restart_interval == 0) {
1113  for (i = 0; i < 6; i++)
1114  vpred[i] = 1 << (s->bits-1);
1115  }
1116 
1117  for (mb_x = 0; mb_x < width; mb_x++) {
1118  int modified_predictor = predictor;
1119 
1120  if (get_bits_left(&s->gb) < 1) {
1121  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1122  return AVERROR_INVALIDDATA;
1123  }
1124 
1125  if (s->restart_interval && !s->restart_count){
1126  s->restart_count = s->restart_interval;
1127  resync_mb_x = mb_x;
1128  resync_mb_y = mb_y;
1129  for(i=0; i<4; i++)
1130  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1131  }
1132  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1133  modified_predictor = 1;
1134 
1135  for (i=0;i<nb_components;i++) {
1136  int pred, dc;
1137 
1138  topleft[i] = top[i];
1139  top[i] = buffer[mb_x][i];
1140 
1141  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1142  if(dc == 0xFFFFF)
1143  return -1;
1144 
1145  if (!s->bayer || mb_x) {
1146  pred = left[i];
1147  } else { /* This path runs only for the first line in bayer images */
1148  vpred[i] += dc;
1149  pred = vpred[i] - dc;
1150  }
1151 
1152  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1153 
1154  left[i] = buffer[mb_x][i] =
1155  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1156  }
1157 
1158  if (s->restart_interval && !--s->restart_count) {
1159  align_get_bits(&s->gb);
1160  skip_bits(&s->gb, 16); /* skip RSTn */
1161  }
1162  }
1163  if (s->rct && s->nb_components == 4) {
1164  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1165  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1166  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1167  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1168  ptr[4*mb_x + 0] = buffer[mb_x][3];
1169  }
1170  } else if (s->nb_components == 4) {
1171  for(i=0; i<nb_components; i++) {
1172  int c= s->comp_index[i];
1173  if (s->bits <= 8) {
1174  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1175  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1176  }
1177  } else if(s->bits == 9) {
1178  return AVERROR_PATCHWELCOME;
1179  } else {
1180  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1181  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1182  }
1183  }
1184  }
1185  } else if (s->rct) {
1186  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1187  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1188  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1189  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1190  }
1191  } else if (s->pegasus_rct) {
1192  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1193  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1194  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1195  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1196  }
1197  } else if (s->bayer) {
1198  if (nb_components == 1) {
1199  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1200  for (mb_x = 0; mb_x < width; mb_x++)
1201  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1202  } else if (nb_components == 2) {
1203  for (mb_x = 0; mb_x < width; mb_x++) {
1204  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1205  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1206  }
1207  }
1208  } else {
1209  for(i=0; i<nb_components; i++) {
1210  int c= s->comp_index[i];
1211  if (s->bits <= 8) {
1212  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1213  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1214  }
1215  } else if(s->bits == 9) {
1216  return AVERROR_PATCHWELCOME;
1217  } else {
1218  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1219  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1220  }
1221  }
1222  }
1223  }
1224  }
1225  return 0;
1226 }
1227 
1228 static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
1229  int point_transform, int nb_components)
1230 {
1231  int i, mb_x, mb_y, mask;
1232  int bits= (s->bits+7)&~7;
1233  int resync_mb_y = 0;
1234  int resync_mb_x = 0;
1235 
1236  point_transform += bits - s->bits;
1237  mask = ((1 << s->bits) - 1) << point_transform;
1238 
1239  av_assert0(nb_components>=1 && nb_components<=4);
1240 
1241  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1242  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1243  if (get_bits_left(&s->gb) < 1) {
1244  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1245  return AVERROR_INVALIDDATA;
1246  }
1247  if (s->restart_interval && !s->restart_count){
1248  s->restart_count = s->restart_interval;
1249  resync_mb_x = mb_x;
1250  resync_mb_y = mb_y;
1251  }
1252 
1253  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1254  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1255  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1256  for (i = 0; i < nb_components; i++) {
1257  uint8_t *ptr;
1258  uint16_t *ptr16;
1259  int n, h, v, x, y, c, j, linesize;
1260  n = s->nb_blocks[i];
1261  c = s->comp_index[i];
1262  h = s->h_scount[i];
1263  v = s->v_scount[i];
1264  x = 0;
1265  y = 0;
1266  linesize= s->linesize[c];
1267 
1268  if(bits>8) linesize /= 2;
1269 
1270  for(j=0; j<n; j++) {
1271  int pred, dc;
1272 
1273  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1274  if(dc == 0xFFFFF)
1275  return -1;
1276  if ( h * mb_x + x >= s->width
1277  || v * mb_y + y >= s->height) {
1278  // Nothing to do
1279  } else if (bits<=8) {
1280  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1281  if(y==0 && toprow){
1282  if(x==0 && leftcol){
1283  pred= 1 << (bits - 1);
1284  }else{
1285  pred= ptr[-1];
1286  }
1287  }else{
1288  if(x==0 && leftcol){
1289  pred= ptr[-linesize];
1290  }else{
1291  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1292  }
1293  }
1294 
1295  if (s->interlaced && s->bottom_field)
1296  ptr += linesize >> 1;
1297  pred &= mask;
1298  *ptr= pred + ((unsigned)dc << point_transform);
1299  }else{
1300  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1301  if(y==0 && toprow){
1302  if(x==0 && leftcol){
1303  pred= 1 << (bits - 1);
1304  }else{
1305  pred= ptr16[-1];
1306  }
1307  }else{
1308  if(x==0 && leftcol){
1309  pred= ptr16[-linesize];
1310  }else{
1311  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1312  }
1313  }
1314 
1315  if (s->interlaced && s->bottom_field)
1316  ptr16 += linesize >> 1;
1317  pred &= mask;
1318  *ptr16= pred + ((unsigned)dc << point_transform);
1319  }
1320  if (++x == h) {
1321  x = 0;
1322  y++;
1323  }
1324  }
1325  }
1326  } else {
1327  for (i = 0; i < nb_components; i++) {
1328  uint8_t *ptr;
1329  uint16_t *ptr16;
1330  int n, h, v, x, y, c, j, linesize, dc;
1331  n = s->nb_blocks[i];
1332  c = s->comp_index[i];
1333  h = s->h_scount[i];
1334  v = s->v_scount[i];
1335  x = 0;
1336  y = 0;
1337  linesize = s->linesize[c];
1338 
1339  if(bits>8) linesize /= 2;
1340 
1341  for (j = 0; j < n; j++) {
1342  int pred;
1343 
1344  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1345  if(dc == 0xFFFFF)
1346  return -1;
1347  if ( h * mb_x + x >= s->width
1348  || v * mb_y + y >= s->height) {
1349  // Nothing to do
1350  } else if (bits<=8) {
1351  ptr = s->picture_ptr->data[c] +
1352  (linesize * (v * mb_y + y)) +
1353  (h * mb_x + x); //FIXME optimize this crap
1354  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1355 
1356  pred &= mask;
1357  *ptr = pred + ((unsigned)dc << point_transform);
1358  }else{
1359  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1360  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1361 
1362  pred &= mask;
1363  *ptr16= pred + ((unsigned)dc << point_transform);
1364  }
1365 
1366  if (++x == h) {
1367  x = 0;
1368  y++;
1369  }
1370  }
1371  }
1372  }
1373  if (s->restart_interval && !--s->restart_count) {
1374  align_get_bits(&s->gb);
1375  skip_bits(&s->gb, 16); /* skip RSTn */
1376  }
1377  }
1378  }
1379  return 0;
1380 }
1381 
1383  uint8_t *dst, const uint8_t *src,
1384  int linesize, int lowres)
1385 {
1386  switch (lowres) {
1387  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1388  break;
1389  case 1: copy_block4(dst, src, linesize, linesize, 4);
1390  break;
1391  case 2: copy_block2(dst, src, linesize, linesize, 2);
1392  break;
1393  case 3: *dst = *src;
1394  break;
1395  }
1396 }
1397 
1398 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1399 {
1400  int block_x, block_y;
1401  int size = 8 >> s->avctx->lowres;
1402  if (s->bits > 8) {
1403  for (block_y=0; block_y<size; block_y++)
1404  for (block_x=0; block_x<size; block_x++)
1405  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1406  } else {
1407  for (block_y=0; block_y<size; block_y++)
1408  for (block_x=0; block_x<size; block_x++)
1409  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1410  }
1411 }
1412 
1413 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1414  int Al, const uint8_t *mb_bitmask,
1415  int mb_bitmask_size,
1416  const AVFrame *reference)
1417 {
1418  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1420  const uint8_t *reference_data[MAX_COMPONENTS];
1421  int linesize[MAX_COMPONENTS];
1422  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1423  int bytes_per_pixel = 1 + (s->bits > 8);
1424 
1425  if (mb_bitmask) {
1426  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1427  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1428  return AVERROR_INVALIDDATA;
1429  }
1430  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1431  }
1432 
1433  s->restart_count = 0;
1434 
1435  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1436  &chroma_v_shift);
1437  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1438  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1439 
1440  for (i = 0; i < nb_components; i++) {
1441  int c = s->comp_index[i];
1442  data[c] = s->picture_ptr->data[c];
1443  reference_data[c] = reference ? reference->data[c] : NULL;
1444  linesize[c] = s->linesize[c];
1445  s->coefs_finished[c] |= 1;
1446  }
1447 
1448  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1449  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1450  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1451 
1452  if (s->restart_interval && !s->restart_count)
1453  s->restart_count = s->restart_interval;
1454 
1455  if (get_bits_left(&s->gb) < 0) {
1456  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1457  -get_bits_left(&s->gb));
1458  return AVERROR_INVALIDDATA;
1459  }
1460  for (i = 0; i < nb_components; i++) {
1461  uint8_t *ptr;
1462  int n, h, v, x, y, c, j;
1463  int block_offset;
1464  n = s->nb_blocks[i];
1465  c = s->comp_index[i];
1466  h = s->h_scount[i];
1467  v = s->v_scount[i];
1468  x = 0;
1469  y = 0;
1470  for (j = 0; j < n; j++) {
1471  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1472  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1473 
1474  if (s->interlaced && s->bottom_field)
1475  block_offset += linesize[c] >> 1;
1476  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1477  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1478  ptr = data[c] + block_offset;
1479  } else
1480  ptr = NULL;
1481  if (!s->progressive) {
1482  if (copy_mb) {
1483  if (ptr)
1484  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1485  linesize[c], s->avctx->lowres);
1486 
1487  } else {
1488  s->bdsp.clear_block(s->block);
1489  if (decode_block(s, s->block, i,
1490  s->dc_index[i], s->ac_index[i],
1491  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1492  av_log(s->avctx, AV_LOG_ERROR,
1493  "error y=%d x=%d\n", mb_y, mb_x);
1494  return AVERROR_INVALIDDATA;
1495  }
1496  if (ptr) {
1497  s->idsp.idct_put(ptr, linesize[c], s->block);
1498  if (s->bits & 7)
1499  shift_output(s, ptr, linesize[c]);
1500  }
1501  }
1502  } else {
1503  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1504  (h * mb_x + x);
1505  int16_t *block = s->blocks[c][block_idx];
1506  if (Ah)
1507  block[0] += get_bits1(&s->gb) *
1508  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1509  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1510  s->quant_matrixes[s->quant_sindex[i]],
1511  Al) < 0) {
1512  av_log(s->avctx, AV_LOG_ERROR,
1513  "error y=%d x=%d\n", mb_y, mb_x);
1514  return AVERROR_INVALIDDATA;
1515  }
1516  }
1517  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1518  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1519  mb_x, mb_y, x, y, c, s->bottom_field,
1520  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1521  if (++x == h) {
1522  x = 0;
1523  y++;
1524  }
1525  }
1526  }
1527 
1528  handle_rstn(s, nb_components);
1529  }
1530  }
1531  return 0;
1532 }
1533 
1535  int se, int Ah, int Al)
1536 {
1537  int mb_x, mb_y;
1538  int EOBRUN = 0;
1539  int c = s->comp_index[0];
1540  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1541 
1542  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1543  if (se < ss || se > 63) {
1544  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1545  return AVERROR_INVALIDDATA;
1546  }
1547 
1548  // s->coefs_finished is a bitmask for coefficients coded
1549  // ss and se are parameters telling start and end coefficients
1550  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1551 
1552  s->restart_count = 0;
1553 
1554  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1555  int block_idx = mb_y * s->block_stride[c];
1556  int16_t (*block)[64] = &s->blocks[c][block_idx];
1557  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1558  if (get_bits_left(&s->gb) <= 0) {
1559  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1560  return AVERROR_INVALIDDATA;
1561  }
1562  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1563  int ret;
1564  if (s->restart_interval && !s->restart_count)
1565  s->restart_count = s->restart_interval;
1566 
1567  if (Ah)
1568  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1569  quant_matrix, ss, se, Al, &EOBRUN);
1570  else
1571  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1572  quant_matrix, ss, se, Al, &EOBRUN);
1573 
1574  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1576  if (ret < 0) {
1577  av_log(s->avctx, AV_LOG_ERROR,
1578  "error y=%d x=%d\n", mb_y, mb_x);
1579  return AVERROR_INVALIDDATA;
1580  }
1581 
1582  if (handle_rstn(s, 0))
1583  EOBRUN = 0;
1584  }
1585  }
1586  return 0;
1587 }
1588 
1590 {
1591  int mb_x, mb_y;
1592  int c;
1593  const int bytes_per_pixel = 1 + (s->bits > 8);
1594  const int block_size = s->lossless ? 1 : 8;
1595 
1596  for (c = 0; c < s->nb_components; c++) {
1597  uint8_t *data = s->picture_ptr->data[c];
1598  int linesize = s->linesize[c];
1599  int h = s->h_max / s->h_count[c];
1600  int v = s->v_max / s->v_count[c];
1601  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1602  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1603 
1604  if (~s->coefs_finished[c])
1605  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1606 
1607  if (s->interlaced && s->bottom_field)
1608  data += linesize >> 1;
1609 
1610  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1611  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1612  int block_idx = mb_y * s->block_stride[c];
1613  int16_t (*block)[64] = &s->blocks[c][block_idx];
1614  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1615  s->idsp.idct_put(ptr, linesize, *block);
1616  if (s->bits & 7)
1617  shift_output(s, ptr, linesize);
1618  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1619  }
1620  }
1621  }
1622 }
1623 
1625  int mb_bitmask_size, const AVFrame *reference)
1626 {
1627  int len, nb_components, i, h, v, predictor, point_transform;
1628  int index, id, ret;
1629  const int block_size = s->lossless ? 1 : 8;
1630  int ilv, prev_shift;
1631 
1632  if (!s->got_picture) {
1633  av_log(s->avctx, AV_LOG_WARNING,
1634  "Can not process SOS before SOF, skipping\n");
1635  return -1;
1636  }
1637 
1638  if (reference) {
1639  if (reference->width != s->picture_ptr->width ||
1640  reference->height != s->picture_ptr->height ||
1641  reference->format != s->picture_ptr->format) {
1642  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1643  return AVERROR_INVALIDDATA;
1644  }
1645  }
1646 
1647  /* XXX: verify len field validity */
1648  len = get_bits(&s->gb, 16);
1649  nb_components = get_bits(&s->gb, 8);
1650  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1652  "decode_sos: nb_components (%d)",
1653  nb_components);
1654  return AVERROR_PATCHWELCOME;
1655  }
1656  if (len != 6 + 2 * nb_components) {
1657  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1658  return AVERROR_INVALIDDATA;
1659  }
1660  for (i = 0; i < nb_components; i++) {
1661  id = get_bits(&s->gb, 8) - 1;
1662  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1663  /* find component index */
1664  for (index = 0; index < s->nb_components; index++)
1665  if (id == s->component_id[index])
1666  break;
1667  if (index == s->nb_components) {
1668  av_log(s->avctx, AV_LOG_ERROR,
1669  "decode_sos: index(%d) out of components\n", index);
1670  return AVERROR_INVALIDDATA;
1671  }
1672  /* Metasoft MJPEG codec has Cb and Cr swapped */
1673  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1674  && nb_components == 3 && s->nb_components == 3 && i)
1675  index = 3 - i;
1676 
1677  s->quant_sindex[i] = s->quant_index[index];
1678  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1679  s->h_scount[i] = s->h_count[index];
1680  s->v_scount[i] = s->v_count[index];
1681 
1682  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1683  index = (index+2)%3;
1684 
1685  s->comp_index[i] = index;
1686 
1687  s->dc_index[i] = get_bits(&s->gb, 4);
1688  s->ac_index[i] = get_bits(&s->gb, 4);
1689 
1690  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1691  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1692  goto out_of_range;
1693  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1694  goto out_of_range;
1695  }
1696 
1697  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1698  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1699  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1700  prev_shift = get_bits(&s->gb, 4); /* Ah */
1701  point_transform = get_bits(&s->gb, 4); /* Al */
1702  }else
1703  prev_shift = point_transform = 0;
1704 
1705  if (nb_components > 1) {
1706  /* interleaved stream */
1707  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1708  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1709  } else if (!s->ls) { /* skip this for JPEG-LS */
1710  h = s->h_max / s->h_scount[0];
1711  v = s->v_max / s->v_scount[0];
1712  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1713  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1714  s->nb_blocks[0] = 1;
1715  s->h_scount[0] = 1;
1716  s->v_scount[0] = 1;
1717  }
1718 
1719  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1720  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1721  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1722  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1723  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1724 
1725 
1726  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1727  for (i = s->mjpb_skiptosod; i > 0; i--)
1728  skip_bits(&s->gb, 8);
1729 
1730 next_field:
1731  for (i = 0; i < nb_components; i++)
1732  s->last_dc[i] = (4 << s->bits);
1733 
1734  if (s->avctx->hwaccel) {
1735  int bytes_to_start = get_bits_count(&s->gb) / 8;
1736  av_assert0(bytes_to_start >= 0 &&
1737  s->raw_scan_buffer_size >= bytes_to_start);
1738 
1739  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1740  s->raw_scan_buffer + bytes_to_start,
1741  s->raw_scan_buffer_size - bytes_to_start);
1742  if (ret < 0)
1743  return ret;
1744 
1745  } else if (s->lossless) {
1746  av_assert0(s->picture_ptr == s->picture);
1747  if (CONFIG_JPEGLS_DECODER && s->ls) {
1748 // for () {
1749 // reset_ls_coding_parameters(s, 0);
1750 
1751  if ((ret = ff_jpegls_decode_picture(s, predictor,
1752  point_transform, ilv)) < 0)
1753  return ret;
1754  } else {
1755  if (s->rgb || s->bayer) {
1756  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1757  return ret;
1758  } else {
1759  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1760  point_transform,
1761  nb_components)) < 0)
1762  return ret;
1763  }
1764  }
1765  } else {
1766  if (s->progressive && predictor) {
1767  av_assert0(s->picture_ptr == s->picture);
1768  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1769  ilv, prev_shift,
1770  point_transform)) < 0)
1771  return ret;
1772  } else {
1773  if ((ret = mjpeg_decode_scan(s, nb_components,
1774  prev_shift, point_transform,
1775  mb_bitmask, mb_bitmask_size, reference)) < 0)
1776  return ret;
1777  }
1778  }
1779 
1780  if (s->interlaced &&
1781  get_bits_left(&s->gb) > 32 &&
1782  show_bits(&s->gb, 8) == 0xFF) {
1783  GetBitContext bak = s->gb;
1784  align_get_bits(&bak);
1785  if (show_bits(&bak, 16) == 0xFFD1) {
1786  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1787  s->gb = bak;
1788  skip_bits(&s->gb, 16);
1789  s->bottom_field ^= 1;
1790 
1791  goto next_field;
1792  }
1793  }
1794 
1795  emms_c();
1796  return 0;
1797  out_of_range:
1798  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1799  return AVERROR_INVALIDDATA;
1800 }
1801 
1803 {
1804  if (get_bits(&s->gb, 16) != 4)
1805  return AVERROR_INVALIDDATA;
1806  s->restart_interval = get_bits(&s->gb, 16);
1807  s->restart_count = 0;
1808  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1809  s->restart_interval);
1810 
1811  return 0;
1812 }
1813 
1815 {
1816  int len, id, i;
1817 
1818  len = get_bits(&s->gb, 16);
1819  if (len < 6) {
1820  if (s->bayer) {
1821  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1822  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1823  skip_bits(&s->gb, len);
1824  return 0;
1825  } else
1826  return AVERROR_INVALIDDATA;
1827  }
1828  if (8 * len > get_bits_left(&s->gb))
1829  return AVERROR_INVALIDDATA;
1830 
1831  id = get_bits_long(&s->gb, 32);
1832  len -= 6;
1833 
1834  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1835  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1836  av_fourcc2str(av_bswap32(id)), id, len);
1837 
1838  /* Buggy AVID, it puts EOI only at every 10th frame. */
1839  /* Also, this fourcc is used by non-avid files too, it holds some
1840  information, but it's always present in AVID-created files. */
1841  if (id == AV_RB32("AVI1")) {
1842  /* structure:
1843  4bytes AVI1
1844  1bytes polarity
1845  1bytes always zero
1846  4bytes field_size
1847  4bytes field_size_less_padding
1848  */
1849  s->buggy_avid = 1;
1850  i = get_bits(&s->gb, 8); len--;
1851  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1852  goto out;
1853  }
1854 
1855  if (id == AV_RB32("JFIF")) {
1856  int t_w, t_h, v1, v2;
1857  if (len < 8)
1858  goto out;
1859  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1860  v1 = get_bits(&s->gb, 8);
1861  v2 = get_bits(&s->gb, 8);
1862  skip_bits(&s->gb, 8);
1863 
1864  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1865  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1866  if ( s->avctx->sample_aspect_ratio.num <= 0
1867  || s->avctx->sample_aspect_ratio.den <= 0) {
1868  s->avctx->sample_aspect_ratio.num = 0;
1869  s->avctx->sample_aspect_ratio.den = 1;
1870  }
1871 
1872  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1873  av_log(s->avctx, AV_LOG_INFO,
1874  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1875  v1, v2,
1876  s->avctx->sample_aspect_ratio.num,
1877  s->avctx->sample_aspect_ratio.den);
1878 
1879  len -= 8;
1880  if (len >= 2) {
1881  t_w = get_bits(&s->gb, 8);
1882  t_h = get_bits(&s->gb, 8);
1883  if (t_w && t_h) {
1884  /* skip thumbnail */
1885  if (len -10 - (t_w * t_h * 3) > 0)
1886  len -= t_w * t_h * 3;
1887  }
1888  len -= 2;
1889  }
1890  goto out;
1891  }
1892 
1893  if ( id == AV_RB32("Adob")
1894  && len >= 7
1895  && show_bits(&s->gb, 8) == 'e'
1896  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1897  skip_bits(&s->gb, 8); /* 'e' */
1898  skip_bits(&s->gb, 16); /* version */
1899  skip_bits(&s->gb, 16); /* flags0 */
1900  skip_bits(&s->gb, 16); /* flags1 */
1901  s->adobe_transform = get_bits(&s->gb, 8);
1902  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1903  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1904  len -= 7;
1905  goto out;
1906  }
1907 
1908  if (id == AV_RB32("LJIF")) {
1909  int rgb = s->rgb;
1910  int pegasus_rct = s->pegasus_rct;
1911  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1912  av_log(s->avctx, AV_LOG_INFO,
1913  "Pegasus lossless jpeg header found\n");
1914  skip_bits(&s->gb, 16); /* version ? */
1915  skip_bits(&s->gb, 16); /* unknown always 0? */
1916  skip_bits(&s->gb, 16); /* unknown always 0? */
1917  skip_bits(&s->gb, 16); /* unknown always 0? */
1918  switch (i=get_bits(&s->gb, 8)) {
1919  case 1:
1920  rgb = 1;
1921  pegasus_rct = 0;
1922  break;
1923  case 2:
1924  rgb = 1;
1925  pegasus_rct = 1;
1926  break;
1927  default:
1928  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1929  }
1930 
1931  len -= 9;
1932  if (s->got_picture)
1933  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1934  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1935  goto out;
1936  }
1937 
1938  s->rgb = rgb;
1939  s->pegasus_rct = pegasus_rct;
1940 
1941  goto out;
1942  }
1943  if (id == AV_RL32("colr") && len > 0) {
1944  s->colr = get_bits(&s->gb, 8);
1945  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1946  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1947  len --;
1948  goto out;
1949  }
1950  if (id == AV_RL32("xfrm") && len > 0) {
1951  s->xfrm = get_bits(&s->gb, 8);
1952  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1953  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1954  len --;
1955  goto out;
1956  }
1957 
1958  /* JPS extension by VRex */
1959  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1960  int flags, layout, type;
1961  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1962  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1963 
1964  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1965  skip_bits(&s->gb, 16); len -= 2; /* block length */
1966  skip_bits(&s->gb, 8); /* reserved */
1967  flags = get_bits(&s->gb, 8);
1968  layout = get_bits(&s->gb, 8);
1969  type = get_bits(&s->gb, 8);
1970  len -= 4;
1971 
1972  av_freep(&s->stereo3d);
1973  s->stereo3d = av_stereo3d_alloc();
1974  if (!s->stereo3d) {
1975  goto out;
1976  }
1977  if (type == 0) {
1978  s->stereo3d->type = AV_STEREO3D_2D;
1979  } else if (type == 1) {
1980  switch (layout) {
1981  case 0x01:
1982  s->stereo3d->type = AV_STEREO3D_LINES;
1983  break;
1984  case 0x02:
1985  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1986  break;
1987  case 0x03:
1988  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1989  break;
1990  }
1991  if (!(flags & 0x04)) {
1992  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
1993  }
1994  }
1995  goto out;
1996  }
1997 
1998  /* EXIF metadata */
1999  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2000  GetByteContext gbytes;
2001  int ret, le, ifd_offset, bytes_read;
2002  const uint8_t *aligned;
2003 
2004  skip_bits(&s->gb, 16); // skip padding
2005  len -= 2;
2006 
2007  // init byte wise reading
2008  aligned = align_get_bits(&s->gb);
2009  bytestream2_init(&gbytes, aligned, len);
2010 
2011  // read TIFF header
2012  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2013  if (ret) {
2014  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2015  } else {
2016  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2017 
2018  // read 0th IFD and store the metadata
2019  // (return values > 0 indicate the presence of subimage metadata)
2020  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2021  if (ret < 0) {
2022  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2023  }
2024  }
2025 
2026  bytes_read = bytestream2_tell(&gbytes);
2027  skip_bits(&s->gb, bytes_read << 3);
2028  len -= bytes_read;
2029 
2030  goto out;
2031  }
2032 
2033  /* Apple MJPEG-A */
2034  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2035  id = get_bits_long(&s->gb, 32);
2036  len -= 4;
2037  /* Apple MJPEG-A */
2038  if (id == AV_RB32("mjpg")) {
2039  /* structure:
2040  4bytes field size
2041  4bytes pad field size
2042  4bytes next off
2043  4bytes quant off
2044  4bytes huff off
2045  4bytes image off
2046  4bytes scan off
2047  4bytes data off
2048  */
2049  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2050  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2051  }
2052  }
2053 
2054  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2055  int id2;
2056  unsigned seqno;
2057  unsigned nummarkers;
2058 
2059  id = get_bits_long(&s->gb, 32);
2060  id2 = get_bits(&s->gb, 24);
2061  len -= 7;
2062  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2063  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2064  goto out;
2065  }
2066 
2067  skip_bits(&s->gb, 8);
2068  seqno = get_bits(&s->gb, 8);
2069  len -= 2;
2070  if (seqno == 0) {
2071  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2072  goto out;
2073  }
2074 
2075  nummarkers = get_bits(&s->gb, 8);
2076  len -= 1;
2077  if (nummarkers == 0) {
2078  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2079  goto out;
2080  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2081  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2082  goto out;
2083  } else if (seqno > nummarkers) {
2084  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2085  goto out;
2086  }
2087 
2088  /* Allocate if this is the first APP2 we've seen. */
2089  if (s->iccnum == 0) {
2090  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2091  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2092  if (!s->iccdata || !s->iccdatalens) {
2093  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2094  return AVERROR(ENOMEM);
2095  }
2096  s->iccnum = nummarkers;
2097  }
2098 
2099  if (s->iccdata[seqno - 1]) {
2100  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2101  goto out;
2102  }
2103 
2104  s->iccdatalens[seqno - 1] = len;
2105  s->iccdata[seqno - 1] = av_malloc(len);
2106  if (!s->iccdata[seqno - 1]) {
2107  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2108  return AVERROR(ENOMEM);
2109  }
2110 
2111  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2112  skip_bits(&s->gb, len << 3);
2113  len = 0;
2114  s->iccread++;
2115 
2116  if (s->iccread > s->iccnum)
2117  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2118  }
2119 
2120 out:
2121  /* slow but needed for extreme adobe jpegs */
2122  if (len < 0)
2123  av_log(s->avctx, AV_LOG_ERROR,
2124  "mjpeg: error, decode_app parser read over the end\n");
2125  while (--len > 0)
2126  skip_bits(&s->gb, 8);
2127 
2128  return 0;
2129 }
2130 
2132 {
2133  int len = get_bits(&s->gb, 16);
2134  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2135  int i;
2136  char *cbuf = av_malloc(len - 1);
2137  if (!cbuf)
2138  return AVERROR(ENOMEM);
2139 
2140  for (i = 0; i < len - 2; i++)
2141  cbuf[i] = get_bits(&s->gb, 8);
2142  if (i > 0 && cbuf[i - 1] == '\n')
2143  cbuf[i - 1] = 0;
2144  else
2145  cbuf[i] = 0;
2146 
2147  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2148  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2149 
2150  /* buggy avid, it puts EOI only at every 10th frame */
2151  if (!strncmp(cbuf, "AVID", 4)) {
2152  parse_avid(s, cbuf, len);
2153  } else if (!strcmp(cbuf, "CS=ITU601"))
2154  s->cs_itu601 = 1;
2155  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2156  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2157  s->flipped = 1;
2158  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2159  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2160  s->multiscope = 2;
2161  }
2162 
2163  av_free(cbuf);
2164  }
2165 
2166  return 0;
2167 }
2168 
2169 /* return the 8 bit start code value and update the search
2170  state. Return -1 if no start code found */
2171 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2172 {
2173  const uint8_t *buf_ptr;
2174  unsigned int v, v2;
2175  int val;
2176  int skipped = 0;
2177 
2178  buf_ptr = *pbuf_ptr;
2179  while (buf_end - buf_ptr > 1) {
2180  v = *buf_ptr++;
2181  v2 = *buf_ptr;
2182  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2183  val = *buf_ptr++;
2184  goto found;
2185  }
2186  skipped++;
2187  }
2188  buf_ptr = buf_end;
2189  val = -1;
2190 found:
2191  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2192  *pbuf_ptr = buf_ptr;
2193  return val;
2194 }
2195 
2197  const uint8_t **buf_ptr, const uint8_t *buf_end,
2198  const uint8_t **unescaped_buf_ptr,
2199  int *unescaped_buf_size)
2200 {
2201  int start_code;
2202  start_code = find_marker(buf_ptr, buf_end);
2203 
2204  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2205  if (!s->buffer)
2206  return AVERROR(ENOMEM);
2207 
2208  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2209  if (start_code == SOS && !s->ls) {
2210  const uint8_t *src = *buf_ptr;
2211  const uint8_t *ptr = src;
2212  uint8_t *dst = s->buffer;
2213 
2214  #define copy_data_segment(skip) do { \
2215  ptrdiff_t length = (ptr - src) - (skip); \
2216  if (length > 0) { \
2217  memcpy(dst, src, length); \
2218  dst += length; \
2219  src = ptr; \
2220  } \
2221  } while (0)
2222 
2223  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2224  ptr = buf_end;
2225  copy_data_segment(0);
2226  } else {
2227  while (ptr < buf_end) {
2228  uint8_t x = *(ptr++);
2229 
2230  if (x == 0xff) {
2231  ptrdiff_t skip = 0;
2232  while (ptr < buf_end && x == 0xff) {
2233  x = *(ptr++);
2234  skip++;
2235  }
2236 
2237  /* 0xFF, 0xFF, ... */
2238  if (skip > 1) {
2239  copy_data_segment(skip);
2240 
2241  /* decrement src as it is equal to ptr after the
2242  * copy_data_segment macro and we might want to
2243  * copy the current value of x later on */
2244  src--;
2245  }
2246 
2247  if (x < RST0 || x > RST7) {
2248  copy_data_segment(1);
2249  if (x)
2250  break;
2251  }
2252  }
2253  }
2254  if (src < ptr)
2255  copy_data_segment(0);
2256  }
2257  #undef copy_data_segment
2258 
2259  *unescaped_buf_ptr = s->buffer;
2260  *unescaped_buf_size = dst - s->buffer;
2261  memset(s->buffer + *unescaped_buf_size, 0,
2263 
2264  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2265  (buf_end - *buf_ptr) - (dst - s->buffer));
2266  } else if (start_code == SOS && s->ls) {
2267  const uint8_t *src = *buf_ptr;
2268  uint8_t *dst = s->buffer;
2269  int bit_count = 0;
2270  int t = 0, b = 0;
2271  PutBitContext pb;
2272 
2273  /* find marker */
2274  while (src + t < buf_end) {
2275  uint8_t x = src[t++];
2276  if (x == 0xff) {
2277  while ((src + t < buf_end) && x == 0xff)
2278  x = src[t++];
2279  if (x & 0x80) {
2280  t -= FFMIN(2, t);
2281  break;
2282  }
2283  }
2284  }
2285  bit_count = t * 8;
2286  init_put_bits(&pb, dst, t);
2287 
2288  /* unescape bitstream */
2289  while (b < t) {
2290  uint8_t x = src[b++];
2291  put_bits(&pb, 8, x);
2292  if (x == 0xFF && b < t) {
2293  x = src[b++];
2294  if (x & 0x80) {
2295  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2296  x &= 0x7f;
2297  }
2298  put_bits(&pb, 7, x);
2299  bit_count--;
2300  }
2301  }
2302  flush_put_bits(&pb);
2303 
2304  *unescaped_buf_ptr = dst;
2305  *unescaped_buf_size = (bit_count + 7) >> 3;
2306  memset(s->buffer + *unescaped_buf_size, 0,
2308  } else {
2309  *unescaped_buf_ptr = *buf_ptr;
2310  *unescaped_buf_size = buf_end - *buf_ptr;
2311  }
2312 
2313  return start_code;
2314 }
2315 
2317 {
2318  int i;
2319 
2320  if (s->iccdata)
2321  for (i = 0; i < s->iccnum; i++)
2322  av_freep(&s->iccdata[i]);
2323  av_freep(&s->iccdata);
2324  av_freep(&s->iccdatalens);
2325 
2326  s->iccread = 0;
2327  s->iccnum = 0;
2328 }
2329 
2330 int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2331  AVPacket *avpkt)
2332 {
2333  AVFrame *frame = data;
2334  const uint8_t *buf = avpkt->data;
2335  int buf_size = avpkt->size;
2336  MJpegDecodeContext *s = avctx->priv_data;
2337  const uint8_t *buf_end, *buf_ptr;
2338  const uint8_t *unescaped_buf_ptr;
2339  int hshift, vshift;
2340  int unescaped_buf_size;
2341  int start_code;
2342  int i, index;
2343  int ret = 0;
2344  int is16bit;
2345 
2346  s->buf_size = buf_size;
2347 
2348  av_dict_free(&s->exif_metadata);
2349  av_freep(&s->stereo3d);
2350  s->adobe_transform = -1;
2351 
2352  if (s->iccnum != 0)
2354 
2355  buf_ptr = buf;
2356  buf_end = buf + buf_size;
2357  while (buf_ptr < buf_end) {
2358  /* find start next marker */
2359  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2360  &unescaped_buf_ptr,
2361  &unescaped_buf_size);
2362  /* EOF */
2363  if (start_code < 0) {
2364  break;
2365  } else if (unescaped_buf_size > INT_MAX / 8) {
2366  av_log(avctx, AV_LOG_ERROR,
2367  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2368  start_code, unescaped_buf_size, buf_size);
2369  return AVERROR_INVALIDDATA;
2370  }
2371  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2372  start_code, buf_end - buf_ptr);
2373 
2374  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2375 
2376  if (ret < 0) {
2377  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2378  goto fail;
2379  }
2380 
2381  s->start_code = start_code;
2382  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2383  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2384 
2385  /* process markers */
2386  if (start_code >= RST0 && start_code <= RST7) {
2387  av_log(avctx, AV_LOG_DEBUG,
2388  "restart marker: %d\n", start_code & 0x0f);
2389  /* APP fields */
2390  } else if (start_code >= APP0 && start_code <= APP15) {
2391  if ((ret = mjpeg_decode_app(s)) < 0)
2392  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2393  av_err2str(ret));
2394  /* Comment */
2395  } else if (start_code == COM) {
2396  ret = mjpeg_decode_com(s);
2397  if (ret < 0)
2398  return ret;
2399  } else if (start_code == DQT) {
2401  if (ret < 0)
2402  return ret;
2403  }
2404 
2405  ret = -1;
2406 
2407  if (!CONFIG_JPEGLS_DECODER &&
2408  (start_code == SOF48 || start_code == LSE)) {
2409  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2410  return AVERROR(ENOSYS);
2411  }
2412 
2413  if (avctx->skip_frame == AVDISCARD_ALL) {
2414  switch(start_code) {
2415  case SOF0:
2416  case SOF1:
2417  case SOF2:
2418  case SOF3:
2419  case SOF48:
2420  case SOI:
2421  case SOS:
2422  case EOI:
2423  break;
2424  default:
2425  goto skip;
2426  }
2427  }
2428 
2429  switch (start_code) {
2430  case SOI:
2431  s->restart_interval = 0;
2432  s->restart_count = 0;
2433  s->raw_image_buffer = buf_ptr;
2434  s->raw_image_buffer_size = buf_end - buf_ptr;
2435  /* nothing to do on SOI */
2436  break;
2437  case DHT:
2438  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2439  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2440  goto fail;
2441  }
2442  break;
2443  case SOF0:
2444  case SOF1:
2445  if (start_code == SOF0)
2446  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2447  else
2449  s->lossless = 0;
2450  s->ls = 0;
2451  s->progressive = 0;
2452  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2453  goto fail;
2454  break;
2455  case SOF2:
2456  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2457  s->lossless = 0;
2458  s->ls = 0;
2459  s->progressive = 1;
2460  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2461  goto fail;
2462  break;
2463  case SOF3:
2464  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2465  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2466  s->lossless = 1;
2467  s->ls = 0;
2468  s->progressive = 0;
2469  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2470  goto fail;
2471  break;
2472  case SOF48:
2473  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2474  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2475  s->lossless = 1;
2476  s->ls = 1;
2477  s->progressive = 0;
2478  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2479  goto fail;
2480  break;
2481  case LSE:
2482  if (!CONFIG_JPEGLS_DECODER ||
2483  (ret = ff_jpegls_decode_lse(s)) < 0)
2484  goto fail;
2485  break;
2486  case EOI:
2487 eoi_parser:
2488  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2489  s->progressive && s->cur_scan && s->got_picture)
2491  s->cur_scan = 0;
2492  if (!s->got_picture) {
2493  av_log(avctx, AV_LOG_WARNING,
2494  "Found EOI before any SOF, ignoring\n");
2495  break;
2496  }
2497  if (s->interlaced) {
2498  s->bottom_field ^= 1;
2499  /* if not bottom field, do not output image yet */
2500  if (s->bottom_field == !s->interlace_polarity)
2501  break;
2502  }
2503  if (avctx->skip_frame == AVDISCARD_ALL) {
2504  s->got_picture = 0;
2505  goto the_end_no_picture;
2506  }
2507  if (s->avctx->hwaccel) {
2508  ret = s->avctx->hwaccel->end_frame(s->avctx);
2509  if (ret < 0)
2510  return ret;
2511 
2512  av_freep(&s->hwaccel_picture_private);
2513  }
2514  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2515  return ret;
2516  *got_frame = 1;
2517  s->got_picture = 0;
2518 
2519  if (!s->lossless) {
2520  int qp = FFMAX3(s->qscale[0],
2521  s->qscale[1],
2522  s->qscale[2]);
2523  int qpw = (s->width + 15) / 16;
2524  AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
2525  if (qp_table_buf) {
2526  memset(qp_table_buf->data, qp, qpw);
2527  av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
2528  }
2529 
2530  if(avctx->debug & FF_DEBUG_QP)
2531  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2532  }
2533 
2534  goto the_end;
2535  case SOS:
2536  s->raw_scan_buffer = buf_ptr;
2537  s->raw_scan_buffer_size = buf_end - buf_ptr;
2538 
2539  s->cur_scan++;
2540  if (avctx->skip_frame == AVDISCARD_ALL) {
2541  skip_bits(&s->gb, get_bits_left(&s->gb));
2542  break;
2543  }
2544 
2545  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2546  (avctx->err_recognition & AV_EF_EXPLODE))
2547  goto fail;
2548  break;
2549  case DRI:
2550  if ((ret = mjpeg_decode_dri(s)) < 0)
2551  return ret;
2552  break;
2553  case SOF5:
2554  case SOF6:
2555  case SOF7:
2556  case SOF9:
2557  case SOF10:
2558  case SOF11:
2559  case SOF13:
2560  case SOF14:
2561  case SOF15:
2562  case JPG:
2563  av_log(avctx, AV_LOG_ERROR,
2564  "mjpeg: unsupported coding type (%x)\n", start_code);
2565  break;
2566  }
2567 
2568 skip:
2569  /* eof process start code */
2570  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2571  av_log(avctx, AV_LOG_DEBUG,
2572  "marker parser used %d bytes (%d bits)\n",
2573  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2574  }
2575  if (s->got_picture && s->cur_scan) {
2576  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2577  goto eoi_parser;
2578  }
2579  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2580  return AVERROR_INVALIDDATA;
2581 fail:
2582  s->got_picture = 0;
2583  return ret;
2584 the_end:
2585 
2586  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2587 
2588  if (AV_RB32(s->upscale_h)) {
2589  int p;
2591  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2592  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2593  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2594  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2595  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2596  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2597  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2598  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2599  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2600  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2601  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2602  );
2603  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2604  if (ret)
2605  return ret;
2606 
2607  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2608  for (p = 0; p<s->nb_components; p++) {
2609  uint8_t *line = s->picture_ptr->data[p];
2610  int w = s->width;
2611  int h = s->height;
2612  if (!s->upscale_h[p])
2613  continue;
2614  if (p==1 || p==2) {
2615  w = AV_CEIL_RSHIFT(w, hshift);
2616  h = AV_CEIL_RSHIFT(h, vshift);
2617  }
2618  if (s->upscale_v[p] == 1)
2619  h = (h+1)>>1;
2620  av_assert0(w > 0);
2621  for (i = 0; i < h; i++) {
2622  if (s->upscale_h[p] == 1) {
2623  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2624  else line[w - 1] = line[(w - 1) / 2];
2625  for (index = w - 2; index > 0; index--) {
2626  if (is16bit)
2627  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2628  else
2629  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2630  }
2631  } else if (s->upscale_h[p] == 2) {
2632  if (is16bit) {
2633  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2634  if (w > 1)
2635  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2636  } else {
2637  line[w - 1] = line[(w - 1) / 3];
2638  if (w > 1)
2639  line[w - 2] = line[w - 1];
2640  }
2641  for (index = w - 3; index > 0; index--) {
2642  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2643  }
2644  }
2645  line += s->linesize[p];
2646  }
2647  }
2648  }
2649  if (AV_RB32(s->upscale_v)) {
2650  int p;
2652  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2654  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2655  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2656  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2657  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2658  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2659  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2660  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2661  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2662  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2663  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2664  );
2665  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2666  if (ret)
2667  return ret;
2668 
2669  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2670  for (p = 0; p < s->nb_components; p++) {
2671  uint8_t *dst;
2672  int w = s->width;
2673  int h = s->height;
2674  if (!s->upscale_v[p])
2675  continue;
2676  if (p==1 || p==2) {
2677  w = AV_CEIL_RSHIFT(w, hshift);
2678  h = AV_CEIL_RSHIFT(h, vshift);
2679  }
2680  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2681  for (i = h - 1; i; i--) {
2682  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2683  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2684  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2685  memcpy(dst, src1, w);
2686  } else {
2687  for (index = 0; index < w; index++)
2688  dst[index] = (src1[index] + src2[index]) >> 1;
2689  }
2690  dst -= s->linesize[p];
2691  }
2692  }
2693  }
2694  if (s->flipped && !s->rgb) {
2695  int j;
2696  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2697  if (ret)
2698  return ret;
2699 
2700  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2701  for (index=0; index<s->nb_components; index++) {
2702  uint8_t *dst = s->picture_ptr->data[index];
2703  int w = s->picture_ptr->width;
2704  int h = s->picture_ptr->height;
2705  if(index && index<3){
2706  w = AV_CEIL_RSHIFT(w, hshift);
2707  h = AV_CEIL_RSHIFT(h, vshift);
2708  }
2709  if(dst){
2710  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2711  for (i=0; i<h/2; i++) {
2712  for (j=0; j<w; j++)
2713  FFSWAP(int, dst[j], dst2[j]);
2714  dst += s->picture_ptr->linesize[index];
2715  dst2 -= s->picture_ptr->linesize[index];
2716  }
2717  }
2718  }
2719  }
2720  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2721  int w = s->picture_ptr->width;
2722  int h = s->picture_ptr->height;
2723  av_assert0(s->nb_components == 4);
2724  for (i=0; i<h; i++) {
2725  int j;
2726  uint8_t *dst[4];
2727  for (index=0; index<4; index++) {
2728  dst[index] = s->picture_ptr->data[index]
2729  + s->picture_ptr->linesize[index]*i;
2730  }
2731  for (j=0; j<w; j++) {
2732  int k = dst[3][j];
2733  int r = dst[0][j] * k;
2734  int g = dst[1][j] * k;
2735  int b = dst[2][j] * k;
2736  dst[0][j] = g*257 >> 16;
2737  dst[1][j] = b*257 >> 16;
2738  dst[2][j] = r*257 >> 16;
2739  dst[3][j] = 255;
2740  }
2741  }
2742  }
2743  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2744  int w = s->picture_ptr->width;
2745  int h = s->picture_ptr->height;
2746  av_assert0(s->nb_components == 4);
2747  for (i=0; i<h; i++) {
2748  int j;
2749  uint8_t *dst[4];
2750  for (index=0; index<4; index++) {
2751  dst[index] = s->picture_ptr->data[index]
2752  + s->picture_ptr->linesize[index]*i;
2753  }
2754  for (j=0; j<w; j++) {
2755  int k = dst[3][j];
2756  int r = (255 - dst[0][j]) * k;
2757  int g = (128 - dst[1][j]) * k;
2758  int b = (128 - dst[2][j]) * k;
2759  dst[0][j] = r*257 >> 16;
2760  dst[1][j] = (g*257 >> 16) + 128;
2761  dst[2][j] = (b*257 >> 16) + 128;
2762  dst[3][j] = 255;
2763  }
2764  }
2765  }
2766 
2767  if (s->stereo3d) {
2769  if (stereo) {
2770  stereo->type = s->stereo3d->type;
2771  stereo->flags = s->stereo3d->flags;
2772  }
2773  av_freep(&s->stereo3d);
2774  }
2775 
2776  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2777  AVFrameSideData *sd;
2778  size_t offset = 0;
2779  int total_size = 0;
2780  int i;
2781 
2782  /* Sum size of all parts. */
2783  for (i = 0; i < s->iccnum; i++)
2784  total_size += s->iccdatalens[i];
2785 
2787  if (!sd) {
2788  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2789  return AVERROR(ENOMEM);
2790  }
2791 
2792  /* Reassemble the parts, which are now in-order. */
2793  for (i = 0; i < s->iccnum; i++) {
2794  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2795  offset += s->iccdatalens[i];
2796  }
2797  }
2798 
2799  av_dict_copy(&((AVFrame *) data)->metadata, s->exif_metadata, 0);
2800  av_dict_free(&s->exif_metadata);
2801 
2802 the_end_no_picture:
2803  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2804  buf_end - buf_ptr);
2805 // return buf_end - buf_ptr;
2806  return buf_ptr - buf;
2807 }
2808 
2809 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2810  * even without having called ff_mjpeg_decode_init(). */
2812 {
2813  MJpegDecodeContext *s = avctx->priv_data;
2814  int i, j;
2815 
2816  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2817  av_log(avctx, AV_LOG_INFO, "Single field\n");
2818  }
2819 
2820  if (s->picture) {
2821  av_frame_free(&s->picture);
2822  s->picture_ptr = NULL;
2823  } else if (s->picture_ptr)
2824  av_frame_unref(s->picture_ptr);
2825 
2826  av_freep(&s->buffer);
2827  av_freep(&s->stereo3d);
2828  av_freep(&s->ljpeg_buffer);
2829  s->ljpeg_buffer_size = 0;
2830 
2831  for (i = 0; i < 3; i++) {
2832  for (j = 0; j < 4; j++)
2833  ff_free_vlc(&s->vlcs[i][j]);
2834  }
2835  for (i = 0; i < MAX_COMPONENTS; i++) {
2836  av_freep(&s->blocks[i]);
2837  av_freep(&s->last_nnz[i]);
2838  }
2839  av_dict_free(&s->exif_metadata);
2840 
2842 
2843  av_freep(&s->hwaccel_picture_private);
2844 
2845  return 0;
2846 }
2847 
2848 static void decode_flush(AVCodecContext *avctx)
2849 {
2850  MJpegDecodeContext *s = avctx->priv_data;
2851  s->got_picture = 0;
2852 }
2853 
2854 #if CONFIG_MJPEG_DECODER
2855 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2856 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2857 static const AVOption options[] = {
2858  { "extern_huff", "Use external huffman table.",
2859  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2860  { NULL },
2861 };
2862 
2863 static const AVClass mjpegdec_class = {
2864  .class_name = "MJPEG decoder",
2865  .item_name = av_default_item_name,
2866  .option = options,
2867  .version = LIBAVUTIL_VERSION_INT,
2868 };
2869 
2871  .name = "mjpeg",
2872  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2873  .type = AVMEDIA_TYPE_VIDEO,
2874  .id = AV_CODEC_ID_MJPEG,
2875  .priv_data_size = sizeof(MJpegDecodeContext),
2877  .close = ff_mjpeg_decode_end,
2879  .flush = decode_flush,
2880  .capabilities = AV_CODEC_CAP_DR1,
2881  .max_lowres = 3,
2882  .priv_class = &mjpegdec_class,
2886  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2887 #if CONFIG_MJPEG_NVDEC_HWACCEL
2888  HWACCEL_NVDEC(mjpeg),
2889 #endif
2890 #if CONFIG_MJPEG_VAAPI_HWACCEL
2891  HWACCEL_VAAPI(mjpeg),
2892 #endif
2893  NULL
2894  },
2895 };
2896 #endif
2897 #if CONFIG_THP_DECODER
2899  .name = "thp",
2900  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2901  .type = AVMEDIA_TYPE_VIDEO,
2902  .id = AV_CODEC_ID_THP,
2903  .priv_data_size = sizeof(MJpegDecodeContext),
2905  .close = ff_mjpeg_decode_end,
2907  .flush = decode_flush,
2908  .capabilities = AV_CODEC_CAP_DR1,
2909  .max_lowres = 3,
2911 };
2912 #endif
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
AVCodec
AVCodec.
Definition: codec.h:190
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:210
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
av_buffer_alloc
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
ff_mjpeg_build_huffman_codes
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1279
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1382
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2848
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:944
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:406
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:273
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:130
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:178
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
AVFrame::width
int width
Definition: frame.h:358
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:439
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1960
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:778
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:91
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
build_vlc
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
Definition: mjpegdec.c:53
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:149
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1228
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1398
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:139
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
fail
#define fail()
Definition: checkasm.h:123
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:441
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1956
GetBitContext
Definition: get_bits.h:61
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2131
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:76
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:381
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:163
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:122
aligned
static int aligned(int val)
Definition: dashdec.c:167
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:845
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:409
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2192
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1958
mask
static const uint16_t mask[17]
Definition: lzw.c:38
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1027
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:119
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:410
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1959
g
const char * g
Definition: vf_curves.c:115
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:350
bits
uint8_t bits
Definition: vp3data.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:169
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:408
RST0
@ RST0
Definition: mjpeg.h:61
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2316
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2811
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:35
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:416
ff_thp_decoder
AVCodec ff_thp_decoder
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:387
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:388
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1589
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:209
VD
#define VD
Definition: cuviddec.c:1080
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:200
SOF13
@ SOF13
Definition: mjpeg.h:52
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:254
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:46
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1413
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:962
lowres
static int lowres
Definition: ffplay.c:336
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1534
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:523
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1062
AVPacket::size
int size
Definition: packet.h:356
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:332
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
FF_QSCALE_TYPE_MPEG1
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:92
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:863
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1624
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
size
int size
Definition: twinvq_data.h:11134
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:208
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2171
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
src1
#define src1
Definition: h264pred.c:139
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2330
interlaced
uint8_t interlaced
Definition: mxfenc.c:2139
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:796
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1957
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1802
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1625
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
len
int len
Definition: vorbis_enc_data.h:452
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:936
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1616
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2196
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVFrame::height
int height
Definition: frame.h:358
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:557
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
VLC
Definition: vlc.h:26
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:428
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1611
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1814
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
ff_mjpeg_decoder
AVCodec ff_mjpeg_decoder
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:93
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:82