FFmpeg
diracdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
3  * Copyright (C) 2009 David Conrad
4  * Copyright (C) 2011 Jordi Ortiz
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Dirac Decoder
26  * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
27  */
28 
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/thread.h"
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "bytestream.h"
34 #include "internal.h"
35 #include "golomb.h"
36 #include "dirac_arith.h"
37 #include "dirac_vlc.h"
38 #include "mpeg12data.h"
39 #include "libavcodec/mpegvideo.h"
40 #include "mpegvideoencdsp.h"
41 #include "dirac_dwt.h"
42 #include "dirac.h"
43 #include "diractab.h"
44 #include "diracdsp.h"
45 #include "videodsp.h"
46 
47 /**
48  * The spec limits this to 3 for frame coding, but in practice can be as high as 6
49  */
50 #define MAX_REFERENCE_FRAMES 8
51 #define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
52 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
53 #define MAX_QUANT 255 /* max quant for VC-2 */
54 #define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
55 
56 /**
57  * DiracBlock->ref flags, if set then the block does MC from the given ref
58  */
59 #define DIRAC_REF_MASK_REF1 1
60 #define DIRAC_REF_MASK_REF2 2
61 #define DIRAC_REF_MASK_GLOBAL 4
62 
63 /**
64  * Value of Picture.reference when Picture is not a reference picture, but
65  * is held for delayed output.
66  */
67 #define DELAYED_PIC_REF 4
68 
69 #define CALC_PADDING(size, depth) \
70  (((size + (1 << depth) - 1) >> depth) << depth)
71 
72 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
73 
74 typedef struct {
76  int interpolated[3]; /* 1 if hpel[] is valid */
77  uint8_t *hpel[3][4];
78  uint8_t *hpel_base[3][4];
79  int reference;
80 } DiracFrame;
81 
82 typedef struct {
83  union {
84  int16_t mv[2][2];
85  int16_t dc[3];
86  } u; /* anonymous unions aren't in C99 :( */
88 } DiracBlock;
89 
90 typedef struct SubBand {
91  int level;
92  int orientation;
93  int stride; /* in bytes */
94  int width;
95  int height;
96  int pshift;
97  int quant;
98  uint8_t *ibuf;
99  struct SubBand *parent;
100 
101  /* for low delay */
102  unsigned length;
104 } SubBand;
105 
106 typedef struct Plane {
108 
109  int width;
110  int height;
111  ptrdiff_t stride;
112 
113  /* block length */
116  /* block separation (block n+1 starts after this many pixels in block n) */
119  /* amount of overspill on each edge (half of the overlap between blocks) */
122 
124 } Plane;
125 
126 /* Used by Low Delay and High Quality profiles */
127 typedef struct DiracSlice {
129  int slice_x;
130  int slice_y;
131  int bytes;
132 } DiracSlice;
133 
134 typedef struct DiracContext {
143  int64_t frame_number; /* number of the next frame to display */
147 
148  int bit_depth; /* bit depth */
149  int pshift; /* pixel shift = bit_depth > 8 */
150 
151  int zero_res; /* zero residue flag */
152  int is_arith; /* whether coeffs use arith or golomb coding */
153  int core_syntax; /* use core syntax only */
154  int low_delay; /* use the low delay syntax */
155  int hq_picture; /* high quality picture, enables low_delay */
156  int ld_picture; /* use low delay picture, turns on low_delay */
157  int dc_prediction; /* has dc prediction */
158  int globalmc_flag; /* use global motion compensation */
159  int num_refs; /* number of reference pictures */
160 
161  /* wavelet decoding */
162  unsigned wavelet_depth; /* depth of the IDWT */
163  unsigned wavelet_idx;
164 
165  /**
166  * schroedinger older than 1.0.8 doesn't store
167  * quant delta if only one codebook exists in a band
168  */
169  unsigned old_delta_quant;
170  unsigned codeblock_mode;
171 
172  unsigned num_x; /* number of horizontal slices */
173  unsigned num_y; /* number of vertical slices */
174 
175  uint8_t *thread_buf; /* Per-thread buffer for coefficient storage */
176  int threads_num_buf; /* Current # of buffers allocated */
177  int thread_buf_size; /* Each thread has a buffer this size */
178 
181 
182  struct {
183  unsigned width;
184  unsigned height;
186 
187  struct {
188  AVRational bytes; /* average bytes per slice */
189  uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
190  } lowdelay;
191 
192  struct {
193  unsigned prefix_bytes;
194  uint64_t size_scaler;
195  } highquality;
196 
197  struct {
198  int pan_tilt[2]; /* pan/tilt vector */
199  int zrs[2][2]; /* zoom/rotate/shear matrix */
200  int perspective[2]; /* perspective vector */
201  unsigned zrs_exp;
202  unsigned perspective_exp;
203  } globalmc[2];
204 
205  /* motion compensation */
206  uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
207  int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
208  unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
209 
210  int blwidth; /* number of blocks (horizontally) */
211  int blheight; /* number of blocks (vertically) */
212  int sbwidth; /* number of superblocks (horizontally) */
213  int sbheight; /* number of superblocks (vertically) */
214 
217 
220 
221  uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
224 
226 
227  void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
228  void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
229  void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
232 
235 
239 } DiracContext;
240 
247 };
248 
249 /* magic number division by 3 from schroedinger */
250 static inline int divide3(int x)
251 {
252  return (int)((x+1U)*21845 + 10922) >> 16;
253 }
254 
255 static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
256 {
257  DiracFrame *remove_pic = NULL;
258  int i, remove_idx = -1;
259 
260  for (i = 0; framelist[i]; i++)
261  if (framelist[i]->avframe->display_picture_number == picnum) {
262  remove_pic = framelist[i];
263  remove_idx = i;
264  }
265 
266  if (remove_pic)
267  for (i = remove_idx; framelist[i]; i++)
268  framelist[i] = framelist[i+1];
269 
270  return remove_pic;
271 }
272 
273 static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
274 {
275  int i;
276  for (i = 0; i < maxframes; i++)
277  if (!framelist[i]) {
278  framelist[i] = frame;
279  return 0;
280  }
281  return -1;
282 }
283 
285 {
286  int sbwidth = DIVRNDUP(s->seq.width, 4);
287  int sbheight = DIVRNDUP(s->seq.height, 4);
288  int i, w, h, top_padding;
289 
290  /* todo: think more about this / use or set Plane here */
291  for (i = 0; i < 3; i++) {
292  int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
293  int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
294  w = s->seq.width >> (i ? s->chroma_x_shift : 0);
295  h = s->seq.height >> (i ? s->chroma_y_shift : 0);
296 
297  /* we allocate the max we support here since num decompositions can
298  * change from frame to frame. Stride is aligned to 16 for SIMD, and
299  * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
300  * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
301  * on each side */
302  top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
303  w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
304  h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
305 
306  s->plane[i].idwt.buf_base = av_mallocz_array((w+max_xblen), h * (2 << s->pshift));
307  s->plane[i].idwt.tmp = av_malloc_array((w+16), 2 << s->pshift);
308  s->plane[i].idwt.buf = s->plane[i].idwt.buf_base + (top_padding*w)*(2 << s->pshift);
309  if (!s->plane[i].idwt.buf_base || !s->plane[i].idwt.tmp)
310  return AVERROR(ENOMEM);
311  }
312 
313  /* fixme: allocate using real stride here */
314  s->sbsplit = av_malloc_array(sbwidth, sbheight);
315  s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
316 
317  if (!s->sbsplit || !s->blmotion)
318  return AVERROR(ENOMEM);
319  return 0;
320 }
321 
323 {
324  int w = s->seq.width;
325  int h = s->seq.height;
326 
327  av_assert0(stride >= w);
328  stride += 64;
329 
330  if (s->buffer_stride >= stride)
331  return 0;
332  s->buffer_stride = 0;
333 
334  av_freep(&s->edge_emu_buffer_base);
335  memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
336  av_freep(&s->mctmp);
337  av_freep(&s->mcscratch);
338 
339  s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
340 
341  s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
342  s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
343 
344  if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
345  return AVERROR(ENOMEM);
346 
347  s->buffer_stride = stride;
348  return 0;
349 }
350 
352 {
353  int i, j, k;
354 
355  for (i = 0; i < MAX_FRAMES; i++) {
356  if (s->all_frames[i].avframe->data[0]) {
357  av_frame_unref(s->all_frames[i].avframe);
358  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
359  }
360 
361  for (j = 0; j < 3; j++)
362  for (k = 1; k < 4; k++)
363  av_freep(&s->all_frames[i].hpel_base[j][k]);
364  }
365 
366  memset(s->ref_frames, 0, sizeof(s->ref_frames));
367  memset(s->delay_frames, 0, sizeof(s->delay_frames));
368 
369  for (i = 0; i < 3; i++) {
370  av_freep(&s->plane[i].idwt.buf_base);
371  av_freep(&s->plane[i].idwt.tmp);
372  }
373 
374  s->buffer_stride = 0;
375  av_freep(&s->sbsplit);
376  av_freep(&s->blmotion);
377  av_freep(&s->edge_emu_buffer_base);
378 
379  av_freep(&s->mctmp);
380  av_freep(&s->mcscratch);
381 }
382 
384 
386 {
387  DiracContext *s = avctx->priv_data;
388  int i, ret;
389 
390  s->avctx = avctx;
391  s->frame_number = -1;
392 
393  s->thread_buf = NULL;
394  s->threads_num_buf = -1;
395  s->thread_buf_size = -1;
396 
397  ff_diracdsp_init(&s->diracdsp);
398  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
399  ff_videodsp_init(&s->vdsp, 8);
400 
401  for (i = 0; i < MAX_FRAMES; i++) {
402  s->all_frames[i].avframe = av_frame_alloc();
403  if (!s->all_frames[i].avframe) {
404  while (i > 0)
405  av_frame_free(&s->all_frames[--i].avframe);
406  return AVERROR(ENOMEM);
407  }
408  }
410  if (ret != 0)
411  return AVERROR_UNKNOWN;
412 
413  return 0;
414 }
415 
417 {
418  DiracContext *s = avctx->priv_data;
420  s->seen_sequence_header = 0;
421  s->frame_number = -1;
422 }
423 
425 {
426  DiracContext *s = avctx->priv_data;
427  int i;
428 
429  dirac_decode_flush(avctx);
430  for (i = 0; i < MAX_FRAMES; i++)
431  av_frame_free(&s->all_frames[i].avframe);
432 
433  av_freep(&s->thread_buf);
434  av_freep(&s->slice_params_buf);
435 
436  return 0;
437 }
438 
439 static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
440 {
441  int coeff = dirac_get_se_golomb(gb);
442  const unsigned sign = FFSIGN(coeff);
443  if (coeff)
444  coeff = sign*((sign * coeff * qfactor + qoffset) >> 2);
445  return coeff;
446 }
447 
448 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
449 
450 #define UNPACK_ARITH(n, type) \
451  static inline void coeff_unpack_arith_##n(DiracArith *c, int qfactor, int qoffset, \
452  SubBand *b, type *buf, int x, int y) \
453  { \
454  int sign, sign_pred = 0, pred_ctx = CTX_ZPZN_F1; \
455  unsigned coeff; \
456  const int mstride = -(b->stride >> (1+b->pshift)); \
457  if (b->parent) { \
458  const type *pbuf = (type *)b->parent->ibuf; \
459  const int stride = b->parent->stride >> (1+b->parent->pshift); \
460  pred_ctx += !!pbuf[stride * (y>>1) + (x>>1)] << 1; \
461  } \
462  if (b->orientation == subband_hl) \
463  sign_pred = buf[mstride]; \
464  if (x) { \
465  pred_ctx += !(buf[-1] | buf[mstride] | buf[-1 + mstride]); \
466  if (b->orientation == subband_lh) \
467  sign_pred = buf[-1]; \
468  } else { \
469  pred_ctx += !buf[mstride]; \
470  } \
471  coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA); \
472  if (coeff) { \
473  coeff = (coeff * qfactor + qoffset) >> 2; \
474  sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred)); \
475  coeff = (coeff ^ -sign) + sign; \
476  } \
477  *buf = coeff; \
478  } \
479 
480 UNPACK_ARITH(8, int16_t)
482 
483 /**
484  * Decode the coeffs in the rectangle defined by left, right, top, bottom
485  * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
486  */
487 static inline int codeblock(DiracContext *s, SubBand *b,
488  GetBitContext *gb, DiracArith *c,
489  int left, int right, int top, int bottom,
490  int blockcnt_one, int is_arith)
491 {
492  int x, y, zero_block;
493  int qoffset, qfactor;
494  uint8_t *buf;
495 
496  /* check for any coded coefficients in this codeblock */
497  if (!blockcnt_one) {
498  if (is_arith)
499  zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
500  else
501  zero_block = get_bits1(gb);
502 
503  if (zero_block)
504  return 0;
505  }
506 
507  if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
508  int quant;
509  if (is_arith)
511  else
513  if (quant > INT_MAX - b->quant || b->quant + quant < 0) {
514  av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
515  return AVERROR_INVALIDDATA;
516  }
517  b->quant += quant;
518  }
519 
520  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
521  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
522  b->quant = 0;
523  return AVERROR_INVALIDDATA;
524  }
525 
526  qfactor = ff_dirac_qscale_tab[b->quant];
527  /* TODO: context pointer? */
528  if (!s->num_refs)
529  qoffset = ff_dirac_qoffset_intra_tab[b->quant] + 2;
530  else
531  qoffset = ff_dirac_qoffset_inter_tab[b->quant] + 2;
532 
533  buf = b->ibuf + top * b->stride;
534  if (is_arith) {
535  for (y = top; y < bottom; y++) {
536  if (c->error)
537  return c->error;
538  for (x = left; x < right; x++) {
539  if (b->pshift) {
540  coeff_unpack_arith_10(c, qfactor, qoffset, b, (int32_t*)(buf)+x, x, y);
541  } else {
542  coeff_unpack_arith_8(c, qfactor, qoffset, b, (int16_t*)(buf)+x, x, y);
543  }
544  }
545  buf += b->stride;
546  }
547  } else {
548  for (y = top; y < bottom; y++) {
549  if (get_bits_left(gb) < 1)
550  return AVERROR_INVALIDDATA;
551  for (x = left; x < right; x++) {
552  int val = coeff_unpack_golomb(gb, qfactor, qoffset);
553  if (b->pshift) {
554  AV_WN32(&buf[4*x], val);
555  } else {
556  AV_WN16(&buf[2*x], val);
557  }
558  }
559  buf += b->stride;
560  }
561  }
562  return 0;
563 }
564 
565 /**
566  * Dirac Specification ->
567  * 13.3 intra_dc_prediction(band)
568  */
569 #define INTRA_DC_PRED(n, type) \
570  static inline void intra_dc_prediction_##n(SubBand *b) \
571  { \
572  type *buf = (type*)b->ibuf; \
573  int x, y; \
574  \
575  for (x = 1; x < b->width; x++) \
576  buf[x] += buf[x-1]; \
577  buf += (b->stride >> (1+b->pshift)); \
578  \
579  for (y = 1; y < b->height; y++) { \
580  buf[0] += buf[-(b->stride >> (1+b->pshift))]; \
581  \
582  for (x = 1; x < b->width; x++) { \
583  int pred = buf[x - 1] + buf[x - (b->stride >> (1+b->pshift))] + buf[x - (b->stride >> (1+b->pshift))-1]; \
584  buf[x] += divide3(pred); \
585  } \
586  buf += (b->stride >> (1+b->pshift)); \
587  } \
588  } \
589 
590 INTRA_DC_PRED(8, int16_t)
591 INTRA_DC_PRED(10, uint32_t)
592 
593 /**
594  * Dirac Specification ->
595  * 13.4.2 Non-skipped subbands. subband_coeffs()
596  */
598 {
599  int cb_x, cb_y, left, right, top, bottom;
600  DiracArith c;
601  GetBitContext gb;
602  int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
603  int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
604  int blockcnt_one = (cb_width + cb_height) == 2;
605  int ret;
606 
607  if (!b->length)
608  return 0;
609 
610  init_get_bits8(&gb, b->coeff_data, b->length);
611 
612  if (is_arith)
613  ff_dirac_init_arith_decoder(&c, &gb, b->length);
614 
615  top = 0;
616  for (cb_y = 0; cb_y < cb_height; cb_y++) {
617  bottom = (b->height * (cb_y+1LL)) / cb_height;
618  left = 0;
619  for (cb_x = 0; cb_x < cb_width; cb_x++) {
620  right = (b->width * (cb_x+1LL)) / cb_width;
621  ret = codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
622  if (ret < 0)
623  return ret;
624  left = right;
625  }
626  top = bottom;
627  }
628 
629  if (b->orientation == subband_ll && s->num_refs == 0) {
630  if (s->pshift) {
631  intra_dc_prediction_10(b);
632  } else {
633  intra_dc_prediction_8(b);
634  }
635  }
636  return 0;
637 }
638 
639 static int decode_subband_arith(AVCodecContext *avctx, void *b)
640 {
641  DiracContext *s = avctx->priv_data;
642  return decode_subband_internal(s, b, 1);
643 }
644 
645 static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
646 {
647  DiracContext *s = avctx->priv_data;
648  SubBand **b = arg;
649  return decode_subband_internal(s, *b, 0);
650 }
651 
652 /**
653  * Dirac Specification ->
654  * [DIRAC_STD] 13.4.1 core_transform_data()
655  */
657 {
658  AVCodecContext *avctx = s->avctx;
660  enum dirac_subband orientation;
661  int level, num_bands = 0;
662  int ret[3*MAX_DWT_LEVELS+1];
663  int i;
664  int damaged_count = 0;
665 
666  /* Unpack all subbands at all levels. */
667  for (level = 0; level < s->wavelet_depth; level++) {
668  for (orientation = !!level; orientation < 4; orientation++) {
669  SubBand *b = &s->plane[comp].band[level][orientation];
670  bands[num_bands++] = b;
671 
672  align_get_bits(&s->gb);
673  /* [DIRAC_STD] 13.4.2 subband() */
674  b->length = get_interleaved_ue_golomb(&s->gb);
675  if (b->length) {
676  b->quant = get_interleaved_ue_golomb(&s->gb);
677  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
678  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
679  b->quant = 0;
680  return AVERROR_INVALIDDATA;
681  }
682  align_get_bits(&s->gb);
683  b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
684  if (b->length > FFMAX(get_bits_left(&s->gb)/8, 0)) {
685  b->length = FFMAX(get_bits_left(&s->gb)/8, 0);
686  damaged_count ++;
687  }
688  skip_bits_long(&s->gb, b->length*8);
689  }
690  }
691  /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
692  if (s->is_arith)
693  avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
694  ret + 3*level + !!level, 4-!!level, sizeof(SubBand));
695  }
696  /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
697  if (!s->is_arith)
698  avctx->execute(avctx, decode_subband_golomb, bands, ret, num_bands, sizeof(SubBand*));
699 
700  for (i = 0; i < s->wavelet_depth * 3 + 1; i++) {
701  if (ret[i] < 0)
702  damaged_count++;
703  }
704  if (damaged_count > (s->wavelet_depth * 3 + 1) /2)
705  return AVERROR_INVALIDDATA;
706 
707  return 0;
708 }
709 
710 #define PARSE_VALUES(type, x, gb, ebits, buf1, buf2) \
711  type *buf = (type *)buf1; \
712  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
713  if (get_bits_count(gb) >= ebits) \
714  return; \
715  if (buf2) { \
716  buf = (type *)buf2; \
717  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
718  if (get_bits_count(gb) >= ebits) \
719  return; \
720  } \
721 
723  int slice_x, int slice_y, int bits_end,
724  SubBand *b1, SubBand *b2)
725 {
726  int left = b1->width * slice_x / s->num_x;
727  int right = b1->width *(slice_x+1) / s->num_x;
728  int top = b1->height * slice_y / s->num_y;
729  int bottom = b1->height *(slice_y+1) / s->num_y;
730 
731  int qfactor, qoffset;
732 
733  uint8_t *buf1 = b1->ibuf + top * b1->stride;
734  uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL;
735  int x, y;
736 
737  if (quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
738  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", quant);
739  return;
740  }
741  qfactor = ff_dirac_qscale_tab[quant];
742  qoffset = ff_dirac_qoffset_intra_tab[quant] + 2;
743  /* we have to constantly check for overread since the spec explicitly
744  requires this, with the meaning that all remaining coeffs are set to 0 */
745  if (get_bits_count(gb) >= bits_end)
746  return;
747 
748  if (s->pshift) {
749  for (y = top; y < bottom; y++) {
750  for (x = left; x < right; x++) {
751  PARSE_VALUES(int32_t, x, gb, bits_end, buf1, buf2);
752  }
753  buf1 += b1->stride;
754  if (buf2)
755  buf2 += b2->stride;
756  }
757  }
758  else {
759  for (y = top; y < bottom; y++) {
760  for (x = left; x < right; x++) {
761  PARSE_VALUES(int16_t, x, gb, bits_end, buf1, buf2);
762  }
763  buf1 += b1->stride;
764  if (buf2)
765  buf2 += b2->stride;
766  }
767  }
768 }
769 
770 /**
771  * Dirac Specification ->
772  * 13.5.2 Slices. slice(sx,sy)
773  */
774 static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
775 {
776  DiracContext *s = avctx->priv_data;
777  DiracSlice *slice = arg;
778  GetBitContext *gb = &slice->gb;
779  enum dirac_subband orientation;
780  int level, quant, chroma_bits, chroma_end;
781 
782  int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
783  int length_bits = av_log2(8 * slice->bytes)+1;
784  int luma_bits = get_bits_long(gb, length_bits);
785  int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
786 
787  /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
788  for (level = 0; level < s->wavelet_depth; level++)
789  for (orientation = !!level; orientation < 4; orientation++) {
790  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
791  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
792  &s->plane[0].band[level][orientation], NULL);
793  }
794 
795  /* consume any unused bits from luma */
796  skip_bits_long(gb, get_bits_count(gb) - luma_end);
797 
798  chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
799  chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
800  /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
801  for (level = 0; level < s->wavelet_depth; level++)
802  for (orientation = !!level; orientation < 4; orientation++) {
803  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
804  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
805  &s->plane[1].band[level][orientation],
806  &s->plane[2].band[level][orientation]);
807  }
808 
809  return 0;
810 }
811 
812 typedef struct SliceCoeffs {
813  int left;
814  int top;
815  int tot_h;
816  int tot_v;
817  int tot;
818 } SliceCoeffs;
819 
820 static int subband_coeffs(DiracContext *s, int x, int y, int p,
822 {
823  int level, coef = 0;
824  for (level = 0; level < s->wavelet_depth; level++) {
825  SliceCoeffs *o = &c[level];
826  SubBand *b = &s->plane[p].band[level][3]; /* orientation doens't matter */
827  o->top = b->height * y / s->num_y;
828  o->left = b->width * x / s->num_x;
829  o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
830  o->tot_v = ((b->height * (y + 1)) / s->num_y) - o->top;
831  o->tot = o->tot_h*o->tot_v;
832  coef += o->tot * (4 - !!level);
833  }
834  return coef;
835 }
836 
837 /**
838  * VC-2 Specification ->
839  * 13.5.3 hq_slice(sx,sy)
840  */
841 static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
842 {
843  int i, level, orientation, quant_idx;
844  int qfactor[MAX_DWT_LEVELS][4], qoffset[MAX_DWT_LEVELS][4];
845  GetBitContext *gb = &slice->gb;
846  SliceCoeffs coeffs_num[MAX_DWT_LEVELS];
847 
848  skip_bits_long(gb, 8*s->highquality.prefix_bytes);
849  quant_idx = get_bits(gb, 8);
850 
851  if (quant_idx > DIRAC_MAX_QUANT_INDEX - 1) {
852  av_log(s->avctx, AV_LOG_ERROR, "Invalid quantization index - %i\n", quant_idx);
853  return AVERROR_INVALIDDATA;
854  }
855 
856  /* Slice quantization (slice_quantizers() in the specs) */
857  for (level = 0; level < s->wavelet_depth; level++) {
858  for (orientation = !!level; orientation < 4; orientation++) {
859  const int quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0);
860  qfactor[level][orientation] = ff_dirac_qscale_tab[quant];
861  qoffset[level][orientation] = ff_dirac_qoffset_intra_tab[quant] + 2;
862  }
863  }
864 
865  /* Luma + 2 Chroma planes */
866  for (i = 0; i < 3; i++) {
867  int coef_num, coef_par, off = 0;
868  int64_t length = s->highquality.size_scaler*get_bits(gb, 8);
869  int64_t bits_end = get_bits_count(gb) + 8*length;
870  const uint8_t *addr = align_get_bits(gb);
871 
872  if (length*8 > get_bits_left(gb)) {
873  av_log(s->avctx, AV_LOG_ERROR, "end too far away\n");
874  return AVERROR_INVALIDDATA;
875  }
876 
877  coef_num = subband_coeffs(s, slice->slice_x, slice->slice_y, i, coeffs_num);
878 
879  if (s->pshift)
880  coef_par = ff_dirac_golomb_read_32bit(addr, length,
881  tmp_buf, coef_num);
882  else
883  coef_par = ff_dirac_golomb_read_16bit(addr, length,
884  tmp_buf, coef_num);
885 
886  if (coef_num > coef_par) {
887  const int start_b = coef_par * (1 << (s->pshift + 1));
888  const int end_b = coef_num * (1 << (s->pshift + 1));
889  memset(&tmp_buf[start_b], 0, end_b - start_b);
890  }
891 
892  for (level = 0; level < s->wavelet_depth; level++) {
893  const SliceCoeffs *c = &coeffs_num[level];
894  for (orientation = !!level; orientation < 4; orientation++) {
895  const SubBand *b1 = &s->plane[i].band[level][orientation];
896  uint8_t *buf = b1->ibuf + c->top * b1->stride + (c->left << (s->pshift + 1));
897 
898  /* Change to c->tot_h <= 4 for AVX2 dequantization */
899  const int qfunc = s->pshift + 2*(c->tot_h <= 2);
900  s->diracdsp.dequant_subband[qfunc](&tmp_buf[off], buf, b1->stride,
901  qfactor[level][orientation],
902  qoffset[level][orientation],
903  c->tot_v, c->tot_h);
904 
905  off += c->tot << (s->pshift + 1);
906  }
907  }
908 
909  skip_bits_long(gb, bits_end - get_bits_count(gb));
910  }
911 
912  return 0;
913 }
914 
915 static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
916 {
917  int i;
918  DiracContext *s = avctx->priv_data;
919  DiracSlice *slices = ((DiracSlice *)arg) + s->num_x*jobnr;
920  uint8_t *thread_buf = &s->thread_buf[s->thread_buf_size*threadnr];
921  for (i = 0; i < s->num_x; i++)
922  decode_hq_slice(s, &slices[i], thread_buf);
923  return 0;
924 }
925 
926 /**
927  * Dirac Specification ->
928  * 13.5.1 low_delay_transform_data()
929  */
931 {
932  AVCodecContext *avctx = s->avctx;
933  int slice_x, slice_y, bufsize;
934  int64_t coef_buf_size, bytes = 0;
935  const uint8_t *buf;
936  DiracSlice *slices;
938  int slice_num = 0;
939 
940  if (s->slice_params_num_buf != (s->num_x * s->num_y)) {
941  s->slice_params_buf = av_realloc_f(s->slice_params_buf, s->num_x * s->num_y, sizeof(DiracSlice));
942  if (!s->slice_params_buf) {
943  av_log(s->avctx, AV_LOG_ERROR, "slice params buffer allocation failure\n");
944  s->slice_params_num_buf = 0;
945  return AVERROR(ENOMEM);
946  }
947  s->slice_params_num_buf = s->num_x * s->num_y;
948  }
949  slices = s->slice_params_buf;
950 
951  /* 8 becacuse that's how much the golomb reader could overread junk data
952  * from another plane/slice at most, and 512 because SIMD */
953  coef_buf_size = subband_coeffs(s, s->num_x - 1, s->num_y - 1, 0, tmp) + 8;
954  coef_buf_size = (coef_buf_size << (1 + s->pshift)) + 512;
955 
956  if (s->threads_num_buf != avctx->thread_count ||
957  s->thread_buf_size != coef_buf_size) {
958  s->threads_num_buf = avctx->thread_count;
959  s->thread_buf_size = coef_buf_size;
960  s->thread_buf = av_realloc_f(s->thread_buf, avctx->thread_count, s->thread_buf_size);
961  if (!s->thread_buf) {
962  av_log(s->avctx, AV_LOG_ERROR, "thread buffer allocation failure\n");
963  return AVERROR(ENOMEM);
964  }
965  }
966 
967  align_get_bits(&s->gb);
968  /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
969  buf = s->gb.buffer + get_bits_count(&s->gb)/8;
970  bufsize = get_bits_left(&s->gb);
971 
972  if (s->hq_picture) {
973  int i;
974 
975  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
976  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
977  bytes = s->highquality.prefix_bytes + 1;
978  for (i = 0; i < 3; i++) {
979  if (bytes <= bufsize/8)
980  bytes += buf[bytes] * s->highquality.size_scaler + 1;
981  }
982  if (bytes >= INT_MAX || bytes*8 > bufsize) {
983  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
984  return AVERROR_INVALIDDATA;
985  }
986 
987  slices[slice_num].bytes = bytes;
988  slices[slice_num].slice_x = slice_x;
989  slices[slice_num].slice_y = slice_y;
990  init_get_bits(&slices[slice_num].gb, buf, bufsize);
991  slice_num++;
992 
993  buf += bytes;
994  if (bufsize/8 >= bytes)
995  bufsize -= bytes*8;
996  else
997  bufsize = 0;
998  }
999  }
1000 
1001  if (s->num_x*s->num_y != slice_num) {
1002  av_log(s->avctx, AV_LOG_ERROR, "too few slices\n");
1003  return AVERROR_INVALIDDATA;
1004  }
1005 
1006  avctx->execute2(avctx, decode_hq_slice_row, slices, NULL, s->num_y);
1007  } else {
1008  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
1009  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
1010  bytes = (slice_num+1) * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den
1011  - slice_num * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den;
1012  if (bytes >= INT_MAX || bytes*8 > bufsize) {
1013  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
1014  return AVERROR_INVALIDDATA;
1015  }
1016  slices[slice_num].bytes = bytes;
1017  slices[slice_num].slice_x = slice_x;
1018  slices[slice_num].slice_y = slice_y;
1019  init_get_bits(&slices[slice_num].gb, buf, bufsize);
1020  slice_num++;
1021 
1022  buf += bytes;
1023  if (bufsize/8 >= bytes)
1024  bufsize -= bytes*8;
1025  else
1026  bufsize = 0;
1027  }
1028  }
1029  avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
1030  sizeof(DiracSlice)); /* [DIRAC_STD] 13.5.2 Slices */
1031  }
1032 
1033  if (s->dc_prediction) {
1034  if (s->pshift) {
1035  intra_dc_prediction_10(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1036  intra_dc_prediction_10(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1037  intra_dc_prediction_10(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1038  } else {
1039  intra_dc_prediction_8(&s->plane[0].band[0][0]);
1040  intra_dc_prediction_8(&s->plane[1].band[0][0]);
1041  intra_dc_prediction_8(&s->plane[2].band[0][0]);
1042  }
1043  }
1044 
1045  return 0;
1046 }
1047 
1049 {
1050  int i, w, h, level, orientation;
1051 
1052  for (i = 0; i < 3; i++) {
1053  Plane *p = &s->plane[i];
1054 
1055  p->width = s->seq.width >> (i ? s->chroma_x_shift : 0);
1056  p->height = s->seq.height >> (i ? s->chroma_y_shift : 0);
1057  p->idwt.width = w = CALC_PADDING(p->width , s->wavelet_depth);
1058  p->idwt.height = h = CALC_PADDING(p->height, s->wavelet_depth);
1059  p->idwt.stride = FFALIGN(p->idwt.width, 8) << (1 + s->pshift);
1060 
1061  for (level = s->wavelet_depth-1; level >= 0; level--) {
1062  w = w>>1;
1063  h = h>>1;
1064  for (orientation = !!level; orientation < 4; orientation++) {
1065  SubBand *b = &p->band[level][orientation];
1066 
1067  b->pshift = s->pshift;
1068  b->ibuf = p->idwt.buf;
1069  b->level = level;
1070  b->stride = p->idwt.stride << (s->wavelet_depth - level);
1071  b->width = w;
1072  b->height = h;
1073  b->orientation = orientation;
1074 
1075  if (orientation & 1)
1076  b->ibuf += w << (1+b->pshift);
1077  if (orientation > 1)
1078  b->ibuf += (b->stride>>1);
1079 
1080  if (level)
1081  b->parent = &p->band[level-1][orientation];
1082  }
1083  }
1084 
1085  if (i > 0) {
1086  p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
1087  p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
1088  p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
1089  p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
1090  }
1091 
1092  p->xoffset = (p->xblen - p->xbsep)/2;
1093  p->yoffset = (p->yblen - p->ybsep)/2;
1094  }
1095 }
1096 
1097 /**
1098  * Unpack the motion compensation parameters
1099  * Dirac Specification ->
1100  * 11.2 Picture prediction data. picture_prediction()
1101  */
1103 {
1104  static const uint8_t default_blen[] = { 4, 12, 16, 24 };
1105 
1106  GetBitContext *gb = &s->gb;
1107  unsigned idx, ref;
1108 
1109  align_get_bits(gb);
1110  /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
1111  /* Luma and Chroma are equal. 11.2.3 */
1112  idx = get_interleaved_ue_golomb(gb); /* [DIRAC_STD] index */
1113 
1114  if (idx > 4) {
1115  av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
1116  return AVERROR_INVALIDDATA;
1117  }
1118 
1119  if (idx == 0) {
1120  s->plane[0].xblen = get_interleaved_ue_golomb(gb);
1121  s->plane[0].yblen = get_interleaved_ue_golomb(gb);
1122  s->plane[0].xbsep = get_interleaved_ue_golomb(gb);
1123  s->plane[0].ybsep = get_interleaved_ue_golomb(gb);
1124  } else {
1125  /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
1126  s->plane[0].xblen = default_blen[idx-1];
1127  s->plane[0].yblen = default_blen[idx-1];
1128  s->plane[0].xbsep = 4 * idx;
1129  s->plane[0].ybsep = 4 * idx;
1130  }
1131  /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
1132  Calculated in function dirac_unpack_block_motion_data */
1133 
1134  if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
1135  s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
1136  !s->plane[0].xblen || !s->plane[0].yblen) {
1137  av_log(s->avctx, AV_LOG_ERROR,
1138  "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
1139  s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
1140  return AVERROR_INVALIDDATA;
1141  }
1142  if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
1143  av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
1144  return AVERROR_INVALIDDATA;
1145  }
1146  if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
1147  av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
1148  return AVERROR_INVALIDDATA;
1149  }
1150  if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
1151  av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
1152  return AVERROR_PATCHWELCOME;
1153  }
1154 
1155  /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
1156  Read motion vector precision */
1157  s->mv_precision = get_interleaved_ue_golomb(gb);
1158  if (s->mv_precision > 3) {
1159  av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
1160  return AVERROR_INVALIDDATA;
1161  }
1162 
1163  /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
1164  Read the global motion compensation parameters */
1165  s->globalmc_flag = get_bits1(gb);
1166  if (s->globalmc_flag) {
1167  memset(s->globalmc, 0, sizeof(s->globalmc));
1168  /* [DIRAC_STD] pan_tilt(gparams) */
1169  for (ref = 0; ref < s->num_refs; ref++) {
1170  if (get_bits1(gb)) {
1171  s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
1172  s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
1173  }
1174  /* [DIRAC_STD] zoom_rotate_shear(gparams)
1175  zoom/rotation/shear parameters */
1176  if (get_bits1(gb)) {
1177  s->globalmc[ref].zrs_exp = get_interleaved_ue_golomb(gb);
1178  s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
1179  s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
1180  s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
1181  s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
1182  } else {
1183  s->globalmc[ref].zrs[0][0] = 1;
1184  s->globalmc[ref].zrs[1][1] = 1;
1185  }
1186  /* [DIRAC_STD] perspective(gparams) */
1187  if (get_bits1(gb)) {
1188  s->globalmc[ref].perspective_exp = get_interleaved_ue_golomb(gb);
1189  s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
1190  s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
1191  }
1192  if (s->globalmc[ref].perspective_exp + (uint64_t)s->globalmc[ref].zrs_exp > 30) {
1193  return AVERROR_INVALIDDATA;
1194  }
1195 
1196  }
1197  }
1198 
1199  /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
1200  Picture prediction mode, not currently used. */
1201  if (get_interleaved_ue_golomb(gb)) {
1202  av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
1203  return AVERROR_INVALIDDATA;
1204  }
1205 
1206  /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
1207  just data read, weight calculation will be done later on. */
1208  s->weight_log2denom = 1;
1209  s->weight[0] = 1;
1210  s->weight[1] = 1;
1211 
1212  if (get_bits1(gb)) {
1213  s->weight_log2denom = get_interleaved_ue_golomb(gb);
1214  if (s->weight_log2denom < 1 || s->weight_log2denom > 8) {
1215  av_log(s->avctx, AV_LOG_ERROR, "weight_log2denom unsupported or invalid\n");
1216  s->weight_log2denom = 1;
1217  return AVERROR_INVALIDDATA;
1218  }
1219  s->weight[0] = dirac_get_se_golomb(gb);
1220  if (s->num_refs == 2)
1221  s->weight[1] = dirac_get_se_golomb(gb);
1222  }
1223  return 0;
1224 }
1225 
1226 /**
1227  * Dirac Specification ->
1228  * 11.3 Wavelet transform data. wavelet_transform()
1229  */
1231 {
1232  GetBitContext *gb = &s->gb;
1233  int i, level;
1234  unsigned tmp;
1235 
1236 #define CHECKEDREAD(dst, cond, errmsg) \
1237  tmp = get_interleaved_ue_golomb(gb); \
1238  if (cond) { \
1239  av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1240  return AVERROR_INVALIDDATA; \
1241  }\
1242  dst = tmp;
1243 
1244  align_get_bits(gb);
1245 
1246  s->zero_res = s->num_refs ? get_bits1(gb) : 0;
1247  if (s->zero_res)
1248  return 0;
1249 
1250  /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
1251  CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
1252 
1253  CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
1254 
1255  if (!s->low_delay) {
1256  /* Codeblock parameters (core syntax only) */
1257  if (get_bits1(gb)) {
1258  for (i = 0; i <= s->wavelet_depth; i++) {
1259  CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
1260  CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
1261  }
1262 
1263  CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
1264  }
1265  else {
1266  for (i = 0; i <= s->wavelet_depth; i++)
1267  s->codeblock[i].width = s->codeblock[i].height = 1;
1268  }
1269  }
1270  else {
1271  s->num_x = get_interleaved_ue_golomb(gb);
1272  s->num_y = get_interleaved_ue_golomb(gb);
1273  if (s->num_x * s->num_y == 0 || s->num_x * (uint64_t)s->num_y > INT_MAX ||
1274  s->num_x * (uint64_t)s->avctx->width > INT_MAX ||
1275  s->num_y * (uint64_t)s->avctx->height > INT_MAX ||
1276  s->num_x > s->avctx->width ||
1277  s->num_y > s->avctx->height
1278  ) {
1279  av_log(s->avctx,AV_LOG_ERROR,"Invalid numx/y\n");
1280  s->num_x = s->num_y = 0;
1281  return AVERROR_INVALIDDATA;
1282  }
1283  if (s->ld_picture) {
1284  s->lowdelay.bytes.num = get_interleaved_ue_golomb(gb);
1285  s->lowdelay.bytes.den = get_interleaved_ue_golomb(gb);
1286  if (s->lowdelay.bytes.den <= 0) {
1287  av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
1288  return AVERROR_INVALIDDATA;
1289  }
1290  } else if (s->hq_picture) {
1291  s->highquality.prefix_bytes = get_interleaved_ue_golomb(gb);
1292  s->highquality.size_scaler = get_interleaved_ue_golomb(gb);
1293  if (s->highquality.prefix_bytes >= INT_MAX / 8) {
1294  av_log(s->avctx,AV_LOG_ERROR,"too many prefix bytes\n");
1295  return AVERROR_INVALIDDATA;
1296  }
1297  }
1298 
1299  /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
1300  if (get_bits1(gb)) {
1301  av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
1302  /* custom quantization matrix */
1303  for (level = 0; level < s->wavelet_depth; level++) {
1304  for (i = !!level; i < 4; i++) {
1305  s->lowdelay.quant[level][i] = get_interleaved_ue_golomb(gb);
1306  }
1307  }
1308  } else {
1309  if (s->wavelet_depth > 4) {
1310  av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
1311  return AVERROR_INVALIDDATA;
1312  }
1313  /* default quantization matrix */
1314  for (level = 0; level < s->wavelet_depth; level++)
1315  for (i = 0; i < 4; i++) {
1316  s->lowdelay.quant[level][i] = ff_dirac_default_qmat[s->wavelet_idx][level][i];
1317  /* haar with no shift differs for different depths */
1318  if (s->wavelet_idx == 3)
1319  s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
1320  }
1321  }
1322  }
1323  return 0;
1324 }
1325 
1326 static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
1327 {
1328  static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1329 
1330  if (!(x|y))
1331  return 0;
1332  else if (!y)
1333  return sbsplit[-1];
1334  else if (!x)
1335  return sbsplit[-stride];
1336 
1337  return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
1338 }
1339 
1340 static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
1341 {
1342  int pred;
1343 
1344  if (!(x|y))
1345  return 0;
1346  else if (!y)
1347  return block[-1].ref & refmask;
1348  else if (!x)
1349  return block[-stride].ref & refmask;
1350 
1351  /* return the majority */
1352  pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
1353  return (pred >> 1) & refmask;
1354 }
1355 
1356 static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
1357 {
1358  int i, n = 0;
1359 
1360  memset(block->u.dc, 0, sizeof(block->u.dc));
1361 
1362  if (x && !(block[-1].ref & 3)) {
1363  for (i = 0; i < 3; i++)
1364  block->u.dc[i] += block[-1].u.dc[i];
1365  n++;
1366  }
1367 
1368  if (y && !(block[-stride].ref & 3)) {
1369  for (i = 0; i < 3; i++)
1370  block->u.dc[i] += block[-stride].u.dc[i];
1371  n++;
1372  }
1373 
1374  if (x && y && !(block[-1-stride].ref & 3)) {
1375  for (i = 0; i < 3; i++)
1376  block->u.dc[i] += block[-1-stride].u.dc[i];
1377  n++;
1378  }
1379 
1380  if (n == 2) {
1381  for (i = 0; i < 3; i++)
1382  block->u.dc[i] = (block->u.dc[i]+1)>>1;
1383  } else if (n == 3) {
1384  for (i = 0; i < 3; i++)
1385  block->u.dc[i] = divide3(block->u.dc[i]);
1386  }
1387 }
1388 
1389 static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
1390 {
1391  int16_t *pred[3];
1392  int refmask = ref+1;
1393  int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
1394  int n = 0;
1395 
1396  if (x && (block[-1].ref & mask) == refmask)
1397  pred[n++] = block[-1].u.mv[ref];
1398 
1399  if (y && (block[-stride].ref & mask) == refmask)
1400  pred[n++] = block[-stride].u.mv[ref];
1401 
1402  if (x && y && (block[-stride-1].ref & mask) == refmask)
1403  pred[n++] = block[-stride-1].u.mv[ref];
1404 
1405  switch (n) {
1406  case 0:
1407  block->u.mv[ref][0] = 0;
1408  block->u.mv[ref][1] = 0;
1409  break;
1410  case 1:
1411  block->u.mv[ref][0] = pred[0][0];
1412  block->u.mv[ref][1] = pred[0][1];
1413  break;
1414  case 2:
1415  block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1416  block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1417  break;
1418  case 3:
1419  block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1420  block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1421  break;
1422  }
1423 }
1424 
1425 static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
1426 {
1427  int ez = s->globalmc[ref].zrs_exp;
1428  int ep = s->globalmc[ref].perspective_exp;
1429  int (*A)[2] = s->globalmc[ref].zrs;
1430  int *b = s->globalmc[ref].pan_tilt;
1431  int *c = s->globalmc[ref].perspective;
1432 
1433  int64_t m = (1<<ep) - (c[0]*(int64_t)x + c[1]*(int64_t)y);
1434  int64_t mx = m * (uint64_t)((A[0][0] * (int64_t)x + A[0][1]*(int64_t)y) + (1LL<<ez) * b[0]);
1435  int64_t my = m * (uint64_t)((A[1][0] * (int64_t)x + A[1][1]*(int64_t)y) + (1LL<<ez) * b[1]);
1436 
1437  block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1438  block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1439 }
1440 
1442  int stride, int x, int y)
1443 {
1444  int i;
1445 
1447  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
1448 
1449  if (s->num_refs == 2) {
1451  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
1452  }
1453 
1454  if (!block->ref) {
1455  pred_block_dc(block, stride, x, y);
1456  for (i = 0; i < 3; i++)
1457  block->u.dc[i] += (unsigned)dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
1458  return;
1459  }
1460 
1461  if (s->globalmc_flag) {
1463  block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
1464  }
1465 
1466  for (i = 0; i < s->num_refs; i++)
1467  if (block->ref & (i+1)) {
1468  if (block->ref & DIRAC_REF_MASK_GLOBAL) {
1469  global_mv(s, block, x, y, i);
1470  } else {
1471  pred_mv(block, stride, x, y, i);
1472  block->u.mv[i][0] += (unsigned)dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1473  block->u.mv[i][1] += (unsigned)dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1474  }
1475  }
1476 }
1477 
1478 /**
1479  * Copies the current block to the other blocks covered by the current superblock split mode
1480  */
1482 {
1483  int x, y;
1484  DiracBlock *dst = block;
1485 
1486  for (x = 1; x < size; x++)
1487  dst[x] = *block;
1488 
1489  for (y = 1; y < size; y++) {
1490  dst += stride;
1491  for (x = 0; x < size; x++)
1492  dst[x] = *block;
1493  }
1494 }
1495 
1496 /**
1497  * Dirac Specification ->
1498  * 12. Block motion data syntax
1499  */
1501 {
1502  GetBitContext *gb = &s->gb;
1503  uint8_t *sbsplit = s->sbsplit;
1504  int i, x, y, q, p;
1505  DiracArith arith[8];
1506 
1507  align_get_bits(gb);
1508 
1509  /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
1510  s->sbwidth = DIVRNDUP(s->seq.width, 4*s->plane[0].xbsep);
1511  s->sbheight = DIVRNDUP(s->seq.height, 4*s->plane[0].ybsep);
1512  s->blwidth = 4 * s->sbwidth;
1513  s->blheight = 4 * s->sbheight;
1514 
1515  /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
1516  decode superblock split modes */
1517  ff_dirac_init_arith_decoder(arith, gb, get_interleaved_ue_golomb(gb)); /* get_interleaved_ue_golomb(gb) is the length */
1518  for (y = 0; y < s->sbheight; y++) {
1519  for (x = 0; x < s->sbwidth; x++) {
1520  unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
1521  if (split > 2)
1522  return AVERROR_INVALIDDATA;
1523  sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
1524  }
1525  sbsplit += s->sbwidth;
1526  }
1527 
1528  /* setup arith decoding */
1530  for (i = 0; i < s->num_refs; i++) {
1531  ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1532  ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1533  }
1534  for (i = 0; i < 3; i++)
1536 
1537  for (y = 0; y < s->sbheight; y++)
1538  for (x = 0; x < s->sbwidth; x++) {
1539  int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
1540  int step = 4 >> s->sbsplit[y * s->sbwidth + x];
1541 
1542  for (q = 0; q < blkcnt; q++)
1543  for (p = 0; p < blkcnt; p++) {
1544  int bx = 4 * x + p*step;
1545  int by = 4 * y + q*step;
1546  DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
1547  decode_block_params(s, arith, block, s->blwidth, bx, by);
1548  propagate_block_data(block, s->blwidth, step);
1549  }
1550  }
1551 
1552  for (i = 0; i < 4 + 2*s->num_refs; i++) {
1553  if (arith[i].error)
1554  return arith[i].error;
1555  }
1556 
1557  return 0;
1558 }
1559 
1560 static int weight(int i, int blen, int offset)
1561 {
1562 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1563  (1 + (6*(i) + offset - 1) / (2*offset - 1))
1564 
1565  if (i < 2*offset)
1566  return ROLLOFF(i);
1567  else if (i > blen-1 - 2*offset)
1568  return ROLLOFF(blen-1 - i);
1569  return 8;
1570 }
1571 
1572 static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
1573  int left, int right, int wy)
1574 {
1575  int x;
1576  for (x = 0; left && x < p->xblen >> 1; x++)
1577  obmc_weight[x] = wy*8;
1578  for (; x < p->xblen >> right; x++)
1579  obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
1580  for (; x < p->xblen; x++)
1581  obmc_weight[x] = wy*8;
1582  for (; x < stride; x++)
1583  obmc_weight[x] = 0;
1584 }
1585 
1586 static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
1587  int left, int right, int top, int bottom)
1588 {
1589  int y;
1590  for (y = 0; top && y < p->yblen >> 1; y++) {
1591  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1592  obmc_weight += stride;
1593  }
1594  for (; y < p->yblen >> bottom; y++) {
1595  int wy = weight(y, p->yblen, p->yoffset);
1596  init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
1597  obmc_weight += stride;
1598  }
1599  for (; y < p->yblen; y++) {
1600  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1601  obmc_weight += stride;
1602  }
1603 }
1604 
1605 static void init_obmc_weights(DiracContext *s, Plane *p, int by)
1606 {
1607  int top = !by;
1608  int bottom = by == s->blheight-1;
1609 
1610  /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
1611  if (top || bottom || by == 1) {
1612  init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
1613  init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
1614  init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
1615  }
1616 }
1617 
1618 static const uint8_t epel_weights[4][4][4] = {
1619  {{ 16, 0, 0, 0 },
1620  { 12, 4, 0, 0 },
1621  { 8, 8, 0, 0 },
1622  { 4, 12, 0, 0 }},
1623  {{ 12, 0, 4, 0 },
1624  { 9, 3, 3, 1 },
1625  { 6, 6, 2, 2 },
1626  { 3, 9, 1, 3 }},
1627  {{ 8, 0, 8, 0 },
1628  { 6, 2, 6, 2 },
1629  { 4, 4, 4, 4 },
1630  { 2, 6, 2, 6 }},
1631  {{ 4, 0, 12, 0 },
1632  { 3, 1, 9, 3 },
1633  { 2, 2, 6, 6 },
1634  { 1, 3, 3, 9 }}
1635 };
1636 
1637 /**
1638  * For block x,y, determine which of the hpel planes to do bilinear
1639  * interpolation from and set src[] to the location in each hpel plane
1640  * to MC from.
1641  *
1642  * @return the index of the put_dirac_pixels_tab function to use
1643  * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
1644  */
1646  int x, int y, int ref, int plane)
1647 {
1648  Plane *p = &s->plane[plane];
1649  uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
1650  int motion_x = block->u.mv[ref][0];
1651  int motion_y = block->u.mv[ref][1];
1652  int mx, my, i, epel, nplanes = 0;
1653 
1654  if (plane) {
1655  motion_x >>= s->chroma_x_shift;
1656  motion_y >>= s->chroma_y_shift;
1657  }
1658 
1659  mx = motion_x & ~(-1U << s->mv_precision);
1660  my = motion_y & ~(-1U << s->mv_precision);
1661  motion_x >>= s->mv_precision;
1662  motion_y >>= s->mv_precision;
1663  /* normalize subpel coordinates to epel */
1664  /* TODO: template this function? */
1665  mx <<= 3 - s->mv_precision;
1666  my <<= 3 - s->mv_precision;
1667 
1668  x += motion_x;
1669  y += motion_y;
1670  epel = (mx|my)&1;
1671 
1672  /* hpel position */
1673  if (!((mx|my)&3)) {
1674  nplanes = 1;
1675  src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
1676  } else {
1677  /* qpel or epel */
1678  nplanes = 4;
1679  for (i = 0; i < 4; i++)
1680  src[i] = ref_hpel[i] + y*p->stride + x;
1681 
1682  /* if we're interpolating in the right/bottom halves, adjust the planes as needed
1683  we increment x/y because the edge changes for half of the pixels */
1684  if (mx > 4) {
1685  src[0] += 1;
1686  src[2] += 1;
1687  x++;
1688  }
1689  if (my > 4) {
1690  src[0] += p->stride;
1691  src[1] += p->stride;
1692  y++;
1693  }
1694 
1695  /* hpel planes are:
1696  [0]: F [1]: H
1697  [2]: V [3]: C */
1698  if (!epel) {
1699  /* check if we really only need 2 planes since either mx or my is
1700  a hpel position. (epel weights of 0 handle this there) */
1701  if (!(mx&3)) {
1702  /* mx == 0: average [0] and [2]
1703  mx == 4: average [1] and [3] */
1704  src[!mx] = src[2 + !!mx];
1705  nplanes = 2;
1706  } else if (!(my&3)) {
1707  src[0] = src[(my>>1) ];
1708  src[1] = src[(my>>1)+1];
1709  nplanes = 2;
1710  }
1711  } else {
1712  /* adjust the ordering if needed so the weights work */
1713  if (mx > 4) {
1714  FFSWAP(const uint8_t *, src[0], src[1]);
1715  FFSWAP(const uint8_t *, src[2], src[3]);
1716  }
1717  if (my > 4) {
1718  FFSWAP(const uint8_t *, src[0], src[2]);
1719  FFSWAP(const uint8_t *, src[1], src[3]);
1720  }
1721  src[4] = epel_weights[my&3][mx&3];
1722  }
1723  }
1724 
1725  /* fixme: v/h _edge_pos */
1726  if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
1727  y + p->yblen > p->height+EDGE_WIDTH/2 ||
1728  x < 0 || y < 0) {
1729  for (i = 0; i < nplanes; i++) {
1730  s->vdsp.emulated_edge_mc(s->edge_emu_buffer[i], src[i],
1731  p->stride, p->stride,
1732  p->xblen, p->yblen, x, y,
1733  p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
1734  src[i] = s->edge_emu_buffer[i];
1735  }
1736  }
1737  return (nplanes>>1) + epel;
1738 }
1739 
1740 static void add_dc(uint16_t *dst, int dc, int stride,
1741  uint8_t *obmc_weight, int xblen, int yblen)
1742 {
1743  int x, y;
1744  dc += 128;
1745 
1746  for (y = 0; y < yblen; y++) {
1747  for (x = 0; x < xblen; x += 2) {
1748  dst[x ] += dc * obmc_weight[x ];
1749  dst[x+1] += dc * obmc_weight[x+1];
1750  }
1751  dst += stride;
1752  obmc_weight += MAX_BLOCKSIZE;
1753  }
1754 }
1755 
1757  uint16_t *mctmp, uint8_t *obmc_weight,
1758  int plane, int dstx, int dsty)
1759 {
1760  Plane *p = &s->plane[plane];
1761  const uint8_t *src[5];
1762  int idx;
1763 
1764  switch (block->ref&3) {
1765  case 0: /* DC */
1766  add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
1767  return;
1768  case 1:
1769  case 2:
1770  idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
1771  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1772  if (s->weight_func)
1773  s->weight_func(s->mcscratch, p->stride, s->weight_log2denom,
1774  s->weight[0] + s->weight[1], p->yblen);
1775  break;
1776  case 3:
1777  idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
1778  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1779  idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
1780  if (s->biweight_func) {
1781  /* fixme: +32 is a quick hack */
1782  s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
1783  s->biweight_func(s->mcscratch, s->mcscratch+32, p->stride, s->weight_log2denom,
1784  s->weight[0], s->weight[1], p->yblen);
1785  } else
1786  s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1787  break;
1788  }
1789  s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
1790 }
1791 
1792 static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
1793 {
1794  Plane *p = &s->plane[plane];
1795  int x, dstx = p->xbsep - p->xoffset;
1796 
1797  block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
1798  mctmp += p->xbsep;
1799 
1800  for (x = 1; x < s->blwidth-1; x++) {
1801  block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
1802  dstx += p->xbsep;
1803  mctmp += p->xbsep;
1804  }
1805  block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
1806 }
1807 
1808 static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
1809 {
1810  int idx = 0;
1811  if (xblen > 8)
1812  idx = 1;
1813  if (xblen > 16)
1814  idx = 2;
1815 
1816  memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
1817  memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
1818  s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
1819  if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
1820  s->weight_func = s->diracdsp.weight_dirac_pixels_tab[idx];
1821  s->biweight_func = s->diracdsp.biweight_dirac_pixels_tab[idx];
1822  } else {
1823  s->weight_func = NULL;
1824  s->biweight_func = NULL;
1825  }
1826 }
1827 
1828 static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
1829 {
1830  /* chroma allocates an edge of 8 when subsampled
1831  which for 4:2:2 means an h edge of 16 and v edge of 8
1832  just use 8 for everything for the moment */
1833  int i, edge = EDGE_WIDTH/2;
1834 
1835  ref->hpel[plane][0] = ref->avframe->data[plane];
1836  s->mpvencdsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
1837 
1838  /* no need for hpel if we only have fpel vectors */
1839  if (!s->mv_precision)
1840  return 0;
1841 
1842  for (i = 1; i < 4; i++) {
1843  if (!ref->hpel_base[plane][i])
1844  ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
1845  if (!ref->hpel_base[plane][i]) {
1846  return AVERROR(ENOMEM);
1847  }
1848  /* we need to be 16-byte aligned even for chroma */
1849  ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
1850  }
1851 
1852  if (!ref->interpolated[plane]) {
1853  s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
1854  ref->hpel[plane][3], ref->hpel[plane][0],
1855  ref->avframe->linesize[plane], width, height);
1856  s->mpvencdsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1857  s->mpvencdsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1858  s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1859  }
1860  ref->interpolated[plane] = 1;
1861 
1862  return 0;
1863 }
1864 
1865 /**
1866  * Dirac Specification ->
1867  * 13.0 Transform data syntax. transform_data()
1868  */
1870 {
1871  DWTContext d;
1872  int y, i, comp, dsty;
1873  int ret;
1874 
1875  if (s->low_delay) {
1876  /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
1877  if (!s->hq_picture) {
1878  for (comp = 0; comp < 3; comp++) {
1879  Plane *p = &s->plane[comp];
1880  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1881  }
1882  }
1883  if (!s->zero_res) {
1884  if ((ret = decode_lowdelay(s)) < 0)
1885  return ret;
1886  }
1887  }
1888 
1889  for (comp = 0; comp < 3; comp++) {
1890  Plane *p = &s->plane[comp];
1891  uint8_t *frame = s->current_picture->avframe->data[comp];
1892 
1893  /* FIXME: small resolutions */
1894  for (i = 0; i < 4; i++)
1895  s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
1896 
1897  if (!s->zero_res && !s->low_delay)
1898  {
1899  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1900  ret = decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
1901  if (ret < 0)
1902  return ret;
1903  }
1904  ret = ff_spatial_idwt_init(&d, &p->idwt, s->wavelet_idx+2,
1905  s->wavelet_depth, s->bit_depth);
1906  if (ret < 0)
1907  return ret;
1908 
1909  if (!s->num_refs) { /* intra */
1910  for (y = 0; y < p->height; y += 16) {
1911  int idx = (s->bit_depth - 8) >> 1;
1912  ff_spatial_idwt_slice2(&d, y+16); /* decode */
1913  s->diracdsp.put_signed_rect_clamped[idx](frame + y*p->stride,
1914  p->stride,
1915  p->idwt.buf + y*p->idwt.stride,
1916  p->idwt.stride, p->width, 16);
1917  }
1918  } else { /* inter */
1919  int rowheight = p->ybsep*p->stride;
1920 
1921  select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
1922 
1923  for (i = 0; i < s->num_refs; i++) {
1924  int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
1925  if (ret < 0)
1926  return ret;
1927  }
1928 
1929  memset(s->mctmp, 0, 4*p->yoffset*p->stride);
1930 
1931  dsty = -p->yoffset;
1932  for (y = 0; y < s->blheight; y++) {
1933  int h = 0,
1934  start = FFMAX(dsty, 0);
1935  uint16_t *mctmp = s->mctmp + y*rowheight;
1936  DiracBlock *blocks = s->blmotion + y*s->blwidth;
1937 
1938  init_obmc_weights(s, p, y);
1939 
1940  if (y == s->blheight-1 || start+p->ybsep > p->height)
1941  h = p->height - start;
1942  else
1943  h = p->ybsep - (start - dsty);
1944  if (h < 0)
1945  break;
1946 
1947  memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
1948  mc_row(s, blocks, mctmp, comp, dsty);
1949 
1950  mctmp += (start - dsty)*p->stride + p->xoffset;
1951  ff_spatial_idwt_slice2(&d, start + h); /* decode */
1952  /* NOTE: add_rect_clamped hasn't been templated hence the shifts.
1953  * idwt.stride is passed as pixels, not in bytes as in the rest of the decoder */
1954  s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
1955  (int16_t*)(p->idwt.buf) + start*(p->idwt.stride >> 1), (p->idwt.stride >> 1), p->width, h);
1956 
1957  dsty += p->ybsep;
1958  }
1959  }
1960  }
1961 
1962 
1963  return 0;
1964 }
1965 
1967 {
1968  int ret, i;
1969  int chroma_x_shift, chroma_y_shift;
1970  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift,
1971  &chroma_y_shift);
1972  if (ret < 0)
1973  return ret;
1974 
1975  f->width = avctx->width + 2 * EDGE_WIDTH;
1976  f->height = avctx->height + 2 * EDGE_WIDTH + 2;
1977  ret = ff_get_buffer(avctx, f, flags);
1978  if (ret < 0)
1979  return ret;
1980 
1981  for (i = 0; f->data[i]; i++) {
1982  int offset = (EDGE_WIDTH >> (i && i<3 ? chroma_y_shift : 0)) *
1983  f->linesize[i] + 32;
1984  f->data[i] += offset;
1985  }
1986  f->width = avctx->width;
1987  f->height = avctx->height;
1988 
1989  return 0;
1990 }
1991 
1992 /**
1993  * Dirac Specification ->
1994  * 11.1.1 Picture Header. picture_header()
1995  */
1997 {
1998  unsigned retire, picnum;
1999  int i, j, ret;
2000  int64_t refdist, refnum;
2001  GetBitContext *gb = &s->gb;
2002 
2003  /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
2004  picnum = s->current_picture->avframe->display_picture_number = get_bits_long(gb, 32);
2005 
2006 
2007  av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
2008 
2009  /* if this is the first keyframe after a sequence header, start our
2010  reordering from here */
2011  if (s->frame_number < 0)
2012  s->frame_number = picnum;
2013 
2014  s->ref_pics[0] = s->ref_pics[1] = NULL;
2015  for (i = 0; i < s->num_refs; i++) {
2016  refnum = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2017  refdist = INT64_MAX;
2018 
2019  /* find the closest reference to the one we want */
2020  /* Jordi: this is needed if the referenced picture hasn't yet arrived */
2021  for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
2022  if (s->ref_frames[j]
2023  && FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum) < refdist) {
2024  s->ref_pics[i] = s->ref_frames[j];
2025  refdist = FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum);
2026  }
2027 
2028  if (!s->ref_pics[i] || refdist)
2029  av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
2030 
2031  /* if there were no references at all, allocate one */
2032  if (!s->ref_pics[i])
2033  for (j = 0; j < MAX_FRAMES; j++)
2034  if (!s->all_frames[j].avframe->data[0]) {
2035  s->ref_pics[i] = &s->all_frames[j];
2036  ret = get_buffer_with_edge(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
2037  if (ret < 0)
2038  return ret;
2039  break;
2040  }
2041 
2042  if (!s->ref_pics[i]) {
2043  av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
2044  return AVERROR_INVALIDDATA;
2045  }
2046 
2047  }
2048 
2049  /* retire the reference frames that are not used anymore */
2050  if (s->current_picture->reference) {
2051  retire = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2052  if (retire != picnum) {
2053  DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
2054 
2055  if (retire_pic)
2056  retire_pic->reference &= DELAYED_PIC_REF;
2057  else
2058  av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
2059  }
2060 
2061  /* if reference array is full, remove the oldest as per the spec */
2062  while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
2063  av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
2064  remove_frame(s->ref_frames, s->ref_frames[0]->avframe->display_picture_number)->reference &= DELAYED_PIC_REF;
2065  }
2066  }
2067 
2068  if (s->num_refs) {
2069  ret = dirac_unpack_prediction_parameters(s); /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
2070  if (ret < 0)
2071  return ret;
2072  ret = dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
2073  if (ret < 0)
2074  return ret;
2075  }
2076  ret = dirac_unpack_idwt_params(s); /* [DIRAC_STD] 11.3 Wavelet transform data */
2077  if (ret < 0)
2078  return ret;
2079 
2080  init_planes(s);
2081  return 0;
2082 }
2083 
2084 static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
2085 {
2086  DiracFrame *out = s->delay_frames[0];
2087  int i, out_idx = 0;
2088  int ret;
2089 
2090  /* find frame with lowest picture number */
2091  for (i = 1; s->delay_frames[i]; i++)
2092  if (s->delay_frames[i]->avframe->display_picture_number < out->avframe->display_picture_number) {
2093  out = s->delay_frames[i];
2094  out_idx = i;
2095  }
2096 
2097  for (i = out_idx; s->delay_frames[i]; i++)
2098  s->delay_frames[i] = s->delay_frames[i+1];
2099 
2100  if (out) {
2101  out->reference ^= DELAYED_PIC_REF;
2102  if((ret = av_frame_ref(picture, out->avframe)) < 0)
2103  return ret;
2104  *got_frame = 1;
2105  }
2106 
2107  return 0;
2108 }
2109 
2110 /**
2111  * Dirac Specification ->
2112  * 9.6 Parse Info Header Syntax. parse_info()
2113  * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
2114  */
2115 #define DATA_UNIT_HEADER_SIZE 13
2116 
2117 /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
2118  inside the function parse_sequence() */
2119 static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
2120 {
2121  DiracContext *s = avctx->priv_data;
2122  DiracFrame *pic = NULL;
2123  AVDiracSeqHeader *dsh;
2124  int ret, i;
2125  uint8_t parse_code;
2126  unsigned tmp;
2127 
2129  return AVERROR_INVALIDDATA;
2130 
2131  parse_code = buf[4];
2132 
2133  init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
2134 
2135  if (parse_code == DIRAC_PCODE_SEQ_HEADER) {
2136  if (s->seen_sequence_header)
2137  return 0;
2138 
2139  /* [DIRAC_STD] 10. Sequence header */
2141  if (ret < 0) {
2142  av_log(avctx, AV_LOG_ERROR, "error parsing sequence header");
2143  return ret;
2144  }
2145 
2146  if (CALC_PADDING((int64_t)dsh->width, MAX_DWT_LEVELS) * CALC_PADDING((int64_t)dsh->height, MAX_DWT_LEVELS) * 5LL > avctx->max_pixels)
2147  ret = AVERROR(ERANGE);
2148  if (ret >= 0)
2149  ret = ff_set_dimensions(avctx, dsh->width, dsh->height);
2150  if (ret < 0) {
2151  av_freep(&dsh);
2152  return ret;
2153  }
2154 
2155  ff_set_sar(avctx, dsh->sample_aspect_ratio);
2156  avctx->pix_fmt = dsh->pix_fmt;
2157  avctx->color_range = dsh->color_range;
2158  avctx->color_trc = dsh->color_trc;
2159  avctx->color_primaries = dsh->color_primaries;
2160  avctx->colorspace = dsh->colorspace;
2161  avctx->profile = dsh->profile;
2162  avctx->level = dsh->level;
2163  avctx->framerate = dsh->framerate;
2164  s->bit_depth = dsh->bit_depth;
2165  s->version.major = dsh->version.major;
2166  s->version.minor = dsh->version.minor;
2167  s->seq = *dsh;
2168  av_freep(&dsh);
2169 
2170  s->pshift = s->bit_depth > 8;
2171 
2173  &s->chroma_x_shift,
2174  &s->chroma_y_shift);
2175  if (ret < 0)
2176  return ret;
2177 
2179  if (ret < 0)
2180  return ret;
2181 
2182  s->seen_sequence_header = 1;
2183  } else if (parse_code == DIRAC_PCODE_END_SEQ) { /* [DIRAC_STD] End of Sequence */
2185  s->seen_sequence_header = 0;
2186  } else if (parse_code == DIRAC_PCODE_AUX) {
2187  if (buf[13] == 1) { /* encoder implementation/version */
2188  int ver[3];
2189  /* versions older than 1.0.8 don't store quant delta for
2190  subbands with only one codeblock */
2191  if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
2192  if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
2193  s->old_delta_quant = 1;
2194  }
2195  } else if (parse_code & 0x8) { /* picture data unit */
2196  if (!s->seen_sequence_header) {
2197  av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
2198  return AVERROR_INVALIDDATA;
2199  }
2200 
2201  /* find an unused frame */
2202  for (i = 0; i < MAX_FRAMES; i++)
2203  if (s->all_frames[i].avframe->data[0] == NULL)
2204  pic = &s->all_frames[i];
2205  if (!pic) {
2206  av_log(avctx, AV_LOG_ERROR, "framelist full\n");
2207  return AVERROR_INVALIDDATA;
2208  }
2209 
2210  av_frame_unref(pic->avframe);
2211 
2212  /* [DIRAC_STD] Defined in 9.6.1 ... */
2213  tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
2214  if (tmp > 2) {
2215  av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
2216  return AVERROR_INVALIDDATA;
2217  }
2218  s->num_refs = tmp;
2219  s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
2220  s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
2221  s->core_syntax = (parse_code & 0x88) == 0x08; /* [DIRAC_STD] is_core_syntax() */
2222  s->ld_picture = (parse_code & 0xF8) == 0xC8; /* [DIRAC_STD] is_ld_picture() */
2223  s->hq_picture = (parse_code & 0xF8) == 0xE8; /* [DIRAC_STD] is_hq_picture() */
2224  s->dc_prediction = (parse_code & 0x28) == 0x08; /* [DIRAC_STD] using_dc_prediction() */
2225  pic->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
2226  pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
2227  pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
2228 
2229  /* VC-2 Low Delay has a different parse code than the Dirac Low Delay */
2230  if (s->version.minor == 2 && parse_code == 0x88)
2231  s->ld_picture = 1;
2232 
2233  if (s->low_delay && !(s->ld_picture || s->hq_picture) ) {
2234  av_log(avctx, AV_LOG_ERROR, "Invalid low delay flag\n");
2235  return AVERROR_INVALIDDATA;
2236  }
2237 
2238  if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
2239  return ret;
2240  s->current_picture = pic;
2241  s->plane[0].stride = pic->avframe->linesize[0];
2242  s->plane[1].stride = pic->avframe->linesize[1];
2243  s->plane[2].stride = pic->avframe->linesize[2];
2244 
2245  if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
2246  return AVERROR(ENOMEM);
2247 
2248  /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
2250  if (ret < 0)
2251  return ret;
2252 
2253  /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
2255  if (ret < 0)
2256  return ret;
2257  }
2258  return 0;
2259 }
2260 
2261 static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
2262 {
2263  DiracContext *s = avctx->priv_data;
2264  AVFrame *picture = data;
2265  uint8_t *buf = pkt->data;
2266  int buf_size = pkt->size;
2267  int i, buf_idx = 0;
2268  int ret;
2269  unsigned data_unit_size;
2270 
2271  /* release unused frames */
2272  for (i = 0; i < MAX_FRAMES; i++)
2273  if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].reference) {
2274  av_frame_unref(s->all_frames[i].avframe);
2275  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
2276  }
2277 
2278  s->current_picture = NULL;
2279  *got_frame = 0;
2280 
2281  /* end of stream, so flush delayed pics */
2282  if (buf_size == 0)
2283  return get_delayed_pic(s, (AVFrame *)data, got_frame);
2284 
2285  for (;;) {
2286  /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
2287  [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
2288  BBCD start code search */
2289  for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
2290  if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
2291  buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
2292  break;
2293  }
2294  /* BBCD found or end of data */
2295  if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
2296  break;
2297 
2298  data_unit_size = AV_RB32(buf+buf_idx+5);
2299  if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
2300  if(data_unit_size > buf_size - buf_idx)
2301  av_log(s->avctx, AV_LOG_ERROR,
2302  "Data unit with size %d is larger than input buffer, discarding\n",
2303  data_unit_size);
2304  buf_idx += 4;
2305  continue;
2306  }
2307  /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
2308  ret = dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size);
2309  if (ret < 0)
2310  {
2311  av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
2312  return ret;
2313  }
2314  buf_idx += data_unit_size;
2315  }
2316 
2317  if (!s->current_picture)
2318  return buf_size;
2319 
2320  if (s->current_picture->avframe->display_picture_number > s->frame_number) {
2321  DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
2322 
2323  s->current_picture->reference |= DELAYED_PIC_REF;
2324 
2325  if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
2326  int min_num = s->delay_frames[0]->avframe->display_picture_number;
2327  /* Too many delayed frames, so we display the frame with the lowest pts */
2328  av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
2329 
2330  for (i = 1; s->delay_frames[i]; i++)
2331  if (s->delay_frames[i]->avframe->display_picture_number < min_num)
2332  min_num = s->delay_frames[i]->avframe->display_picture_number;
2333 
2334  delayed_frame = remove_frame(s->delay_frames, min_num);
2335  add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
2336  }
2337 
2338  if (delayed_frame) {
2339  delayed_frame->reference ^= DELAYED_PIC_REF;
2340  if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0)
2341  return ret;
2342  *got_frame = 1;
2343  }
2344  } else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
2345  /* The right frame at the right time :-) */
2346  if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0)
2347  return ret;
2348  *got_frame = 1;
2349  }
2350 
2351  if (*got_frame)
2352  s->frame_number = picture->display_picture_number + 1LL;
2353 
2354  return buf_idx;
2355 }
2356 
2358  .name = "dirac",
2359  .long_name = NULL_IF_CONFIG_SMALL("BBC Dirac VC-2"),
2360  .type = AVMEDIA_TYPE_VIDEO,
2361  .id = AV_CODEC_ID_DIRAC,
2362  .priv_data_size = sizeof(DiracContext),
2364  .close = dirac_decode_end,
2367  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2369 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:29
DWTPlane::buf
uint8_t * buf
Definition: dirac_dwt.h:41
DATA_UNIT_HEADER_SIZE
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
Definition: diracdec.c:2115
AVCodec
AVCodec.
Definition: codec.h:190
DiracContext::put_pixels_tab
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:227
DiracContext::blmotion
DiracBlock * blmotion
Definition: diracdec.c:216
av_dirac_parse_sequence_header
int av_dirac_parse_sequence_header(AVDiracSeqHeader **pdsh, const uint8_t *buf, size_t buf_size, void *log_ctx)
Parse a Dirac sequence header.
Definition: dirac.c:402
stride
int stride
Definition: mace.c:144
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
DiracContext::num_y
unsigned num_y
Definition: diracdec.c:173
level
uint8_t level
Definition: svq3.c:210
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
DiracContext::blwidth
int blwidth
Definition: diracdec.c:210
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DiracVersionInfo
Definition: dirac.h:76
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
SliceCoeffs::left
int left
Definition: diracdec.c:813
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
thread.h
DiracBlock::ref
uint8_t ref
Definition: diracdec.c:87
subband_hh
@ subband_hh
Definition: diracdec.c:245
CTX_MV_DATA
#define CTX_MV_DATA
Definition: dirac_arith.h:71
MAX_DWT_LEVELS
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: dirac.h:45
free_sequence_buffers
static void free_sequence_buffers(DiracContext *s)
Definition: diracdec.c:351
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
epel_weights
static const uint8_t epel_weights[4][4][4]
Definition: diracdec.c:1618
AV_CODEC_ID_DIRAC
@ AV_CODEC_ID_DIRAC
Definition: codec_id.h:165
dirac_decode_picture_header
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
Definition: diracdec.c:1996
SliceCoeffs::tot
int tot
Definition: diracdec.c:817
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
DiracContext::wavelet_idx
unsigned wavelet_idx
Definition: diracdec.c:163
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
dirac_unpack_prediction_parameters
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data.
Definition: diracdec.c:1102
DIRAC_REF_MASK_REF1
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
Definition: diracdec.c:59
DiracContext::avctx
AVCodecContext * avctx
Definition: diracdec.c:135
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVDiracSeqHeader::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: dirac.h:109
DiracVersionInfo::major
int major
Definition: dirac.h:77
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
SubBand::stride
int stride
Definition: diracdec.c:93
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
DWTPlane
Definition: dirac_dwt.h:37
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
SubBand::width
int width
Definition: cfhd.h:49
SubBand::level
int level
Definition: cfhd.h:45
DiracContext::biweight_func
dirac_biweight_func biweight_func
Definition: diracdec.c:231
CTX_SB_F1
#define CTX_SB_F1
Definition: dirac_arith.h:65
CTX_ZERO_BLOCK
@ CTX_ZERO_BLOCK
Definition: dirac_arith.h:54
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
DiracContext::perspective_exp
unsigned perspective_exp
Definition: diracdec.c:202
DiracContext::bit_depth
int bit_depth
Definition: diracdec.c:148
ff_dirac_decoder
AVCodec ff_dirac_decoder
Definition: diracdec.c:2357
decode_lowdelay_slice
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
Definition: diracdec.c:774
DiracContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: diracdec.c:136
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
dirac_biweight_func
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
Definition: diracdsp.h:28
init_planes
static void init_planes(DiracContext *s)
Definition: diracdec.c:1048
dirac_dwt.h
mpegvideo.h
DIRAC_REF_MASK_GLOBAL
#define DIRAC_REF_MASK_GLOBAL
Definition: diracdec.c:61
dirac_arith_init
static AVOnce dirac_arith_init
Definition: diracdec.c:383
DiracContext::delay_frames
DiracFrame * delay_frames[MAX_DELAY+1]
Definition: diracdec.c:237
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
ff_dirac_qscale_tab
const int32_t ff_dirac_qscale_tab[116]
Definition: diractab.c:34
AVDiracSeqHeader::color_range
enum AVColorRange color_range
Definition: dirac.h:107
MAX_DELAY
#define MAX_DELAY
Definition: diracdec.c:51
DiracArith
Definition: dirac_arith.h:75
dirac_get_arith_int
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:191
codeblock
static int codeblock(DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13....
Definition: diracdec.c:487
CHECKEDREAD
#define CHECKEDREAD(dst, cond, errmsg)
DiracContext::mcscratch
uint8_t * mcscratch
Definition: diracdec.c:222
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
AVDiracSeqHeader::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: dirac.h:104
DiracContext::current_picture
DiracFrame * current_picture
Definition: diracdec.c:233
alloc_buffers
static int alloc_buffers(DiracContext *s, int stride)
Definition: diracdec.c:322
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
DiracContext::zrs
int zrs[2][2]
Definition: diracdec.c:199
diractab.h
ff_dirac_default_qmat
const uint8_t ff_dirac_default_qmat[7][4][4]
Definition: diractab.c:24
A
#define A(x)
Definition: vp56_arith.h:28
DiracFrame
Definition: diracdec.c:74
CTX_DC_F1
#define CTX_DC_F1
Definition: dirac_arith.h:72
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:2069
golomb.h
exp golomb vlc stuff
decode_subband_arith
static int decode_subband_arith(AVCodecContext *avctx, void *b)
Definition: diracdec.c:639
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AVDiracSeqHeader::level
int level
Definition: dirac.h:101
SubBand::parent
struct SubBand * parent
Definition: diracdec.c:99
subband_lh
@ subband_lh
Definition: diracdec.c:244
DiracContext::ld_picture
int ld_picture
Definition: diracdec.c:156
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1332
DiracContext::edge_emu_buffer
uint8_t * edge_emu_buffer[4]
Definition: diracdec.c:218
AVDiracSeqHeader::version
DiracVersionInfo version
Definition: dirac.h:112
DiracContext::num_refs
int num_refs
Definition: diracdec.c:159
ff_spatial_idwt_init
int ff_spatial_idwt_init(DWTContext *d, DWTPlane *p, enum dwt_type type, int decomposition_count, int bit_depth)
Definition: dirac_dwt.c:36
U
#define U(x)
Definition: vp56_arith.h:37
DiracContext::sbheight
int sbheight
Definition: diracdec.c:213
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1785
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
GetBitContext
Definition: get_bits.h:61
DiracContext::blheight
int blheight
Definition: diracdec.c:211
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
Plane::band
SubBand band[DWT_LEVELS][4]
Definition: cfhd.h:69
val
static double val(void *priv, double ch)
Definition: aeval.c:76
dirac_unpack_block_motion_data
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
Definition: diracdec.c:1500
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
decode_component
static int decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
Definition: diracdec.c:656
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
DiracSlice::gb
GetBitContext gb
Definition: diracdec.c:128
pred_sbsplit
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
Definition: diracdec.c:1326
pred_block_dc
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1356
dirac.h
select_dsp_funcs
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
Definition: diracdec.c:1808
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
diracdsp.h
DiracContext::hq_picture
int hq_picture
Definition: diracdec.c:155
DiracSlice::bytes
int bytes
Definition: diracdec.c:131
dirac_weight_func
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
Definition: diracdsp.h:27
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
DiracContext::perspective
int perspective[2]
Definition: diracdec.c:200
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVDiracSeqHeader::bit_depth
int bit_depth
Definition: dirac.h:113
av_cold
#define av_cold
Definition: attributes.h:90
DiracContext::sbwidth
int sbwidth
Definition: diracdec.c:212
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
coeff_unpack_golomb
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
Definition: diracdec.c:439
DiracContext::chroma_y_shift
int chroma_y_shift
Definition: diracdec.c:146
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ROLLOFF
#define ROLLOFF(i)
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
width
#define width
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
DiracSlice::slice_x
int slice_x
Definition: diracdec.c:129
DiracContext::zero_res
int zero_res
Definition: diracdec.c:151
DiracContext::mctmp
uint16_t * mctmp
Definition: diracdec.c:221
Plane::xbsep
uint8_t xbsep
Definition: diracdec.c:117
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
MAX_REFERENCE_FRAMES
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
Definition: diracdec.c:50
ff_dirac_init_arith_decoder
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
Definition: dirac_arith.c:96
decode_subband_internal
static av_always_inline int decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
Definition: diracdec.c:597
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVDiracSeqHeader::profile
int profile
Definition: dirac.h:100
CTX_DELTA_Q_F
@ CTX_DELTA_Q_F
Definition: dirac_arith.h:55
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
DiracContext::version
DiracVersionInfo version
Definition: diracdec.c:139
get_bits.h
DiracContext::size_scaler
uint64_t size_scaler
Definition: diracdec.c:194
DWTPlane::stride
int stride
Definition: dirac_dwt.h:40
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2256
SubBand::ibuf
uint8_t * ibuf
Definition: cfhd.h:54
Plane::idwt
DWTPlane idwt
Definition: diracdec.c:107
bands
static const float bands[]
Definition: af_superequalizer.c:56
DiracContext::seen_sequence_header
int seen_sequence_header
Definition: diracdec.c:142
DiracContext::diracdsp
DiracDSPContext diracdsp
Definition: diracdec.c:138
f
#define f(width, name)
Definition: cbs_vp9.c:255
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
DiracContext::weight
int16_t weight[2]
Definition: diracdec.c:207
AVDiracSeqHeader::framerate
AVRational framerate
Definition: dirac.h:103
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
DiracContext::slice_params_buf
DiracSlice * slice_params_buf
Definition: diracdec.c:179
DiracContext::edge_emu_buffer_base
uint8_t * edge_emu_buffer_base
Definition: diracdec.c:219
dirac_get_arith_bit
static int dirac_get_arith_bit(DiracArith *c, int ctx)
Definition: dirac_arith.h:134
mc_row
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
Definition: diracdec.c:1792
ff_dirac_qoffset_inter_tab
const int ff_dirac_qoffset_inter_tab[122]
Definition: diractab.c:72
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:173
DIVRNDUP
#define DIVRNDUP(a, b)
Definition: diracdec.c:72
decode_hq_slice_row
static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
Definition: diracdec.c:915
DiracContext::weight_func
dirac_weight_func weight_func
Definition: diracdec.c:230
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
dirac_decode_frame_internal
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
Definition: diracdec.c:1869
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
SliceCoeffs::tot_v
int tot_v
Definition: diracdec.c:816
decode_lowdelay
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
Definition: diracdec.c:930
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:33
DiracContext::dc_prediction
int dc_prediction
Definition: diracdec.c:157
DiracContext::wavelet_depth
unsigned wavelet_depth
Definition: diracdec.c:162
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVDiracSeqHeader::colorspace
enum AVColorSpace colorspace
Definition: dirac.h:110
DiracSlice::slice_y
int slice_y
Definition: diracdec.c:130
DiracContext::ref_frames
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
Definition: diracdec.c:236
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
DiracContext::old_delta_quant
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band
Definition: diracdec.c:169
src
#define src
Definition: vp8dsp.c:254
dirac_get_arith_uint
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:175
DiracContext::lowdelay
struct DiracContext::@55 lowdelay
CTX_MV_F1
#define CTX_MV_F1
Definition: dirac_arith.h:70
dirac_decode_frame
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
Definition: diracdec.c:2261
DIRAC_MAX_QUANT_INDEX
#define DIRAC_MAX_QUANT_INDEX
Definition: diractab.h:41
DIRAC_PCODE_AUX
@ DIRAC_PCODE_AUX
Definition: dirac.h:60
DiracContext::highquality
struct DiracContext::@56 highquality
AVCodecContext::level
int level
level
Definition: avcodec.h:1982
DiracContext::thread_buf_size
int thread_buf_size
Definition: diracdec.c:177
subband_ll
@ subband_ll
Definition: diracdec.c:242
AVOnce
#define AVOnce
Definition: thread.h:172
DiracContext::is_arith
int is_arith
Definition: diracdec.c:152
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
DiracContext::width
unsigned width
Definition: diracdec.c:183
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1560
ff_spatial_idwt_slice2
void ff_spatial_idwt_slice2(DWTContext *d, int y)
Definition: dirac_dwt.c:67
dirac_subband
dirac_subband
Definition: diracdec.c:241
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
add_frame
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
Definition: diracdec.c:273
INTRA_DC_PRED
#define INTRA_DC_PRED(n, type)
Dirac Specification -> 13.3 intra_dc_prediction(band)
Definition: diracdec.c:569
DiracContext::globalmc
struct DiracContext::@57 globalmc[2]
dirac_decode_end
static av_cold int dirac_decode_end(AVCodecContext *avctx)
Definition: diracdec.c:424
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
Plane::yoffset
uint8_t yoffset
Definition: diracdec.c:121
Plane::yblen
uint8_t yblen
Definition: diracdec.c:115
AVPacket::size
int size
Definition: packet.h:356
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
Plane::height
int height
Definition: cfhd.h:59
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
DiracContext::gb
GetBitContext gb
Definition: diracdec.c:140
init_obmc_weight_row
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Definition: diracdec.c:1572
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
DiracContext::codeblock_mode
unsigned codeblock_mode
Definition: diracdec.c:170
size
int size
Definition: twinvq_data.h:11134
DiracContext::chroma_x_shift
int chroma_x_shift
Definition: diracdec.c:145
SubBand::length
unsigned length
Definition: diracdec.c:102
DiracContext::seq
AVDiracSeqHeader seq
Definition: diracdec.c:141
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
DiracContext::bytes
AVRational bytes
Definition: diracdec.c:188
dirac_vlc.h
DiracContext::weight_log2denom
unsigned weight_log2denom
Definition: diracdec.c:208
SubBand
Definition: cfhd.h:44
DiracContext::thread_buf
uint8_t * thread_buf
Definition: diracdec.c:175
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
height
#define height
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1333
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
Plane::width
int width
Definition: cfhd.h:58
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
dirac_get_se_golomb
static int dirac_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:359
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
add_dc
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
Definition: diracdec.c:1740
DIRAC_PCODE_SEQ_HEADER
@ DIRAC_PCODE_SEQ_HEADER
Definition: dirac.h:58
get_buffer_with_edge
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
Definition: diracdec.c:1966
pred_block_mode
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
Definition: diracdec.c:1340
decode_hq_slice
static int decode_hq_slice(DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
VC-2 Specification -> 13.5.3 hq_slice(sx,sy)
Definition: diracdec.c:841
subband_coeffs
static int subband_coeffs(DiracContext *s, int x, int y, int p, SliceCoeffs c[MAX_DWT_LEVELS])
Definition: diracdec.c:820
subband_nb
@ subband_nb
Definition: diracdec.c:246
init_obmc_weights
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
Definition: diracdec.c:1605
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
Plane::stride
ptrdiff_t stride
Definition: cfhd.h:60
DiracContext::num_x
unsigned num_x
Definition: diracdec.c:172
DiracContext::prefix_bytes
unsigned prefix_bytes
Definition: diracdec.c:193
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
DiracBlock
Definition: diracdec.c:82
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
DiracContext::low_delay
int low_delay
Definition: diracdec.c:154
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1389
UNPACK_ARITH
#define UNPACK_ARITH(n, type)
Definition: diracdec.c:450
alloc_sequence_buffers
static int alloc_sequence_buffers(DiracContext *s)
Definition: diracdec.c:284
DiracDSPContext
Definition: diracdsp.h:30
DIRAC_REF_MASK_REF2
#define DIRAC_REF_MASK_REF2
Definition: diracdec.c:60
PARSE_VALUES
#define PARSE_VALUES(type, x, gb, ebits, buf1, buf2)
Definition: diracdec.c:710
decode_block_params
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1441
ff_dirac_golomb_read_16bit
int ff_dirac_golomb_read_16bit(const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:1093
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
DiracContext::globalmc_flag
int globalmc_flag
Definition: diracdec.c:158
CTX_PMODE_REF2
#define CTX_PMODE_REF2
Definition: dirac_arith.h:68
av_always_inline
#define av_always_inline
Definition: attributes.h:49
DiracFrame::avframe
AVFrame * avframe
Definition: diracdec.c:75
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
dirac_arith.h
ff_dirac_init_arith_tables
av_cold void ff_dirac_init_arith_tables(void)
Definition: dirac_arith.c:86
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
MAX_BLOCKSIZE
#define MAX_BLOCKSIZE
Definition: diracdec.c:54
mc_subpel
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
Definition: diracdec.c:1645
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
DiracSlice
Definition: diracdec.c:127
interpolate_refplane
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
Definition: diracdec.c:1828
DWTContext
Definition: dirac_dwt.h:54
SliceCoeffs::tot_h
int tot_h
Definition: diracdec.c:815
DiracContext::core_syntax
int core_syntax
Definition: diracdec.c:153
DiracVersionInfo::minor
int minor
Definition: dirac.h:78
avcodec.h
DiracContext::mv_precision
uint8_t mv_precision
Definition: diracdec.c:206
DiracContext::height
unsigned height
Definition: diracdec.c:184
SubBand::coeff_data
const uint8_t * coeff_data
Definition: diracdec.c:103
AVDiracSeqHeader
Definition: dirac.h:81
mid_pred
#define mid_pred
Definition: mathops.h:97
SliceCoeffs::top
int top
Definition: diracdec.c:814
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
Plane::xoffset
uint8_t xoffset
Definition: diracdec.c:120
SliceCoeffs
Definition: diracdec.c:812
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SubBand::pshift
int pshift
Definition: cfhd.h:52
DiracContext::pan_tilt
int pan_tilt[2]
Definition: diracdec.c:198
SubBand::quant
int quant
Definition: cfhd.h:53
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
remove_frame
static DiracFrame * remove_frame(DiracFrame *framelist[], int picnum)
Definition: diracdec.c:255
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg12data.h
ff_dirac_golomb_read_32bit
int ff_dirac_golomb_read_32bit(const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:1113
DIRAC_PCODE_END_SEQ
@ DIRAC_PCODE_END_SEQ
Definition: dirac.h:59
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
AVDiracSeqHeader::pix_fmt
enum AVPixelFormat pix_fmt
Definition: dirac.h:106
decode_subband_golomb
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
Definition: diracdec.c:645
DiracContext
Definition: diracdec.c:134
ff_dirac_qoffset_intra_tab
const int32_t ff_dirac_qoffset_intra_tab[120]
Definition: diractab.c:53
CTX_SB_DATA
#define CTX_SB_DATA
Definition: dirac_arith.h:66
DiracContext::add_obmc
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
Definition: diracdec.c:229
AVCodecContext
main external API structure.
Definition: avcodec.h:526
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1825
subband_hl
@ subband_hl
Definition: diracdec.c:243
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
DiracContext::all_frames
DiracFrame all_frames[MAX_FRAMES]
Definition: diracdec.c:238
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1859
mpegvideoencdsp.h
CALC_PADDING
#define CALC_PADDING(size, depth)
Definition: diracdec.c:69
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
DiracContext::threads_num_buf
int threads_num_buf
Definition: diracdec.c:176
DiracContext::vdsp
VideoDSPContext vdsp
Definition: diracdec.c:137
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
CTX_GLOBAL_BLOCK
#define CTX_GLOBAL_BLOCK
Definition: dirac_arith.h:69
Plane
Definition: cfhd.h:57
DiracFrame::reference
int reference
Definition: diracdec.c:79
divide3
static int divide3(int x)
Definition: diracdec.c:250
VideoDSPContext
Definition: videodsp.h:41
Plane::xblen
uint8_t xblen
Definition: diracdec.c:114
DiracContext::quant
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: diracdec.c:189
DiracContext::plane
Plane plane[3]
Definition: diracdec.c:144
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
propagate_block_data
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode.
Definition: diracdec.c:1481
DiracContext::buffer_stride
int buffer_stride
Definition: diracdec.c:223
DiracContext::slice_params_num_buf
int slice_params_num_buf
Definition: diracdec.c:180
quant
const uint8_t * quant
Definition: vorbis_enc_data.h:458
Plane::ybsep
uint8_t ybsep
Definition: diracdec.c:118
AVDiracSeqHeader::width
unsigned width
Definition: dirac.h:82
CTX_PMODE_REF1
#define CTX_PMODE_REF1
Definition: dirac_arith.h:67
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
global_mv
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
Definition: diracdec.c:1425
decode_subband
static void decode_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2)
Definition: diracdec.c:722
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DiracContext::codeblock
struct DiracContext::@54 codeblock[MAX_DWT_LEVELS+1]
DiracContext::avg_pixels_tab
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:228
videodsp.h
DiracContext::ref_pics
DiracFrame * ref_pics[2]
Definition: diracdec.c:234
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
DiracContext::sbsplit
uint8_t * sbsplit
Definition: diracdec.c:215
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
AVDiracSeqHeader::height
unsigned height
Definition: dirac.h:83
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CTX_DELTA_Q_DATA
@ CTX_DELTA_Q_DATA
Definition: dirac_arith.h:56
dirac_decode_data_unit
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
Definition: diracdec.c:2119
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
dirac_decode_init
static av_cold int dirac_decode_init(AVCodecContext *avctx)
Definition: diracdec.c:385
h
h
Definition: vp9dsp_template.c:2038
ff_diracdsp_init
av_cold void ff_diracdsp_init(DiracDSPContext *c)
Definition: diracdsp.c:219
dirac_unpack_idwt_params
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
Definition: diracdec.c:1230
DiracContext::zrs_exp
unsigned zrs_exp
Definition: diracdec.c:201
DiracContext::obmc_weight
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]
Definition: diracdec.c:225
DWTPlane::width
int width
Definition: dirac_dwt.h:38
dirac_decode_flush
static void dirac_decode_flush(AVCodecContext *avctx)
Definition: diracdec.c:416
int
int
Definition: ffmpeg_filter.c:192
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:418
DiracContext::frame_number
int64_t frame_number
Definition: diracdec.c:143
DiracArith::error
int error
Definition: dirac_arith.h:84
AVDiracSeqHeader::color_primaries
enum AVColorPrimaries color_primaries
Definition: dirac.h:108
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1845
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
CTX_DC_DATA
#define CTX_DC_DATA
Definition: dirac_arith.h:73
get_delayed_pic
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
Definition: diracdec.c:2084
SubBand::height
int height
Definition: cfhd.h:51
DiracContext::pshift
int pshift
Definition: diracdec.c:149
MAX_FRAMES
#define MAX_FRAMES
Definition: diracdec.c:52
SubBand::orientation
int orientation
Definition: cfhd.h:46
init_obmc_weight
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
Definition: diracdec.c:1586
block_mc
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
Definition: diracdec.c:1756
DWTPlane::height
int height
Definition: dirac_dwt.h:39
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:372