FFmpeg
snowdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/log.h"
24 #include "libavutil/opt.h"
25 #include "avcodec.h"
26 #include "codec_internal.h"
27 #include "snow_dwt.h"
28 #include "snow.h"
29 
30 #include "rangecoder.h"
31 #include "mathops.h"
32 
33 static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
34  Plane *p= &s->plane[plane_index];
35  const int mb_w= s->b_width << s->block_max_depth;
36  const int mb_h= s->b_height << s->block_max_depth;
37  int x, y, mb_x;
38  int block_size = MB_SIZE >> s->block_max_depth;
39  int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
40  int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
41  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
42  int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
43  int ref_stride= s->current_picture->linesize[plane_index];
44  uint8_t *dst8= s->current_picture->data[plane_index];
45  int w= p->width;
46  int h= p->height;
47 
48  if(s->keyframe || (s->avctx->debug&512)){
49  if(mb_y==mb_h)
50  return;
51 
52  if(add){
53  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
54 // DWTELEM * line = slice_buffer_get_line(sb, y);
55  IDWTELEM * line = sb->line[y];
56  for(x=0; x<w; x++){
57 // int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
58  int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
59  v >>= FRAC_BITS;
60  if(v&(~255)) v= ~(v>>31);
61  dst8[x + y*ref_stride]= v;
62  }
63  }
64  }else{
65  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
66 // DWTELEM * line = slice_buffer_get_line(sb, y);
67  IDWTELEM * line = sb->line[y];
68  for(x=0; x<w; x++){
69  line[x] -= 128 << FRAC_BITS;
70 // buf[x + y*w]-= 128<<FRAC_BITS;
71  }
72  }
73  }
74 
75  return;
76  }
77 
78  for(mb_x=0; mb_x<=mb_w; mb_x++){
79  add_yblock(s, 1, sb, old_buffer, dst8, obmc,
80  block_w*mb_x - block_w/2,
81  block_h*mb_y - block_h/2,
82  block_w, block_h,
83  w, h,
84  w, ref_stride, obmc_stride,
85  mb_x - 1, mb_y - 1,
86  add, 0, plane_index);
87  }
88 
89  if(s->avmv && mb_y < mb_h && plane_index == 0)
90  for(mb_x=0; mb_x<mb_w; mb_x++){
91  AVMotionVector *avmv = s->avmv + s->avmv_index;
92  const int b_width = s->b_width << s->block_max_depth;
93  const int b_stride= b_width;
94  BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
95 
96  if (bn->type)
97  continue;
98 
99  s->avmv_index++;
100 
101  avmv->w = block_w;
102  avmv->h = block_h;
103  avmv->dst_x = block_w*mb_x - block_w/2;
104  avmv->dst_y = block_h*mb_y - block_h/2;
105  avmv->motion_scale = 8;
106  avmv->motion_x = bn->mx * s->mv_scale;
107  avmv->motion_y = bn->my * s->mv_scale;
108  avmv->src_x = avmv->dst_x + avmv->motion_x / 8;
109  avmv->src_y = avmv->dst_y + avmv->motion_y / 8;
110  avmv->source= -1 - bn->ref;
111  avmv->flags = 0;
112  }
113 }
114 
115 static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
116  const int w= b->width;
117  int y;
118  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
119  int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
120  int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
121  int new_index = 0;
122 
123  if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
124  qadd= 0;
125  qmul= 1<<QEXPSHIFT;
126  }
127 
128  /* If we are on the second or later slice, restore our index. */
129  if (start_y != 0)
130  new_index = save_state[0];
131 
132 
133  for(y=start_y; y<h; y++){
134  int x = 0;
135  int v;
136  IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
137  memset(line, 0, b->width*sizeof(IDWTELEM));
138  v = b->x_coeff[new_index].coeff;
139  x = b->x_coeff[new_index++].x;
140  while(x < w){
141  register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT;
142  register int u= -(v&1);
143  line[x] = (t^u) - u;
144 
145  v = b->x_coeff[new_index].coeff;
146  x = b->x_coeff[new_index++].x;
147  }
148  }
149 
150  /* Save our variables for the next slice. */
151  save_state[0] = new_index;
152 
153  return;
154 }
155 
156 static int decode_q_branch(SnowContext *s, int level, int x, int y){
157  const int w= s->b_width << s->block_max_depth;
158  const int rem_depth= s->block_max_depth - level;
159  const int index= (x + y*w) << rem_depth;
160  int trx= (x+1)<<rem_depth;
161  const BlockNode *left = x ? &s->block[index-1] : &null_block;
162  const BlockNode *top = y ? &s->block[index-w] : &null_block;
163  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
164  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
165  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
166  int res;
167 
168  if(s->keyframe){
170  return 0;
171  }
172 
173  if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
174  int type, mx, my;
175  int l = left->color[0];
176  int cb= left->color[1];
177  int cr= left->color[2];
178  unsigned ref = 0;
179  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
180  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
181  int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
182 
183  type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
184  if(type){
185  int ld, cbd, crd;
186  pred_mv(s, &mx, &my, 0, left, top, tr);
187  ld = get_symbol(&s->c, &s->block_state[32], 1);
188  if (ld < -255 || ld > 255) {
189  return AVERROR_INVALIDDATA;
190  }
191  l += ld;
192  if (s->nb_planes > 2) {
193  cbd = get_symbol(&s->c, &s->block_state[64], 1);
194  crd = get_symbol(&s->c, &s->block_state[96], 1);
195  if (cbd < -255 || cbd > 255 || crd < -255 || crd > 255) {
196  return AVERROR_INVALIDDATA;
197  }
198  cb += cbd;
199  cr += crd;
200  }
201  }else{
202  if(s->ref_frames > 1)
203  ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
204  if (ref >= s->ref_frames) {
205  av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
206  return AVERROR_INVALIDDATA;
207  }
208  pred_mv(s, &mx, &my, ref, left, top, tr);
209  mx+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
210  my+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
211  }
212  set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
213  }else{
214  if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
215  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
216  (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
217  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
218  return res;
219  }
220  return 0;
221 }
222 
223 static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
224  const int w= b->width;
225  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
226  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
227  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
228  int x,y;
229 
230  if(s->qlog == LOSSLESS_QLOG) return;
231 
232  for(y=start_y; y<end_y; y++){
233 // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
234  IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
235  for(x=0; x<w; x++){
236  int i= line[x];
237  if(i<0){
238  line[x]= -((-i*(unsigned)qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
239  }else if(i>0){
240  line[x]= (( i*(unsigned)qmul + qadd)>>(QEXPSHIFT));
241  }
242  }
243  }
244 }
245 
246 static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
247  const int w= b->width;
248  int x,y;
249 
250  IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
251  IDWTELEM * prev;
252 
253  if (start_y != 0)
254  line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
255 
256  for(y=start_y; y<end_y; y++){
257  prev = line;
258 // line = slice_buffer_get_line_from_address(sb, src + (y * stride));
259  line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
260  for(x=0; x<w; x++){
261  if(x){
262  if(use_median){
263  if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
264  else line[x] += line[x - 1];
265  }else{
266  if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
267  else line[x] += line[x - 1];
268  }
269  }else{
270  if(y) line[x] += prev[x];
271  }
272  }
273  }
274 }
275 
276 static void decode_qlogs(SnowContext *s){
277  int plane_index, level, orientation;
278 
279  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
280  for(level=0; level<s->spatial_decomposition_count; level++){
281  for(orientation=level ? 1:0; orientation<4; orientation++){
282  int q;
283  if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
284  else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
285  else q= get_symbol(&s->c, s->header_state, 1);
286  s->plane[plane_index].band[level][orientation].qlog= q;
287  }
288  }
289  }
290 }
291 
292 #define GET_S(dst, check) \
293  tmp= get_symbol(&s->c, s->header_state, 0);\
294  if(!(check)){\
295  av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
296  return AVERROR_INVALIDDATA;\
297  }\
298  dst= tmp;
299 
301  int plane_index, tmp;
302  uint8_t kstate[32];
303 
304  memset(kstate, MID_STATE, sizeof(kstate));
305 
306  s->keyframe= get_rac(&s->c, kstate);
307  if(s->keyframe || s->always_reset){
309  s->spatial_decomposition_type=
310  s->qlog=
311  s->qbias=
312  s->mv_scale=
313  s->block_max_depth= 0;
314  }
315  if(s->keyframe){
316  GET_S(s->version, tmp <= 0U)
317  s->always_reset= get_rac(&s->c, s->header_state);
318  s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
319  s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
320  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
321  s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
322  if (s->colorspace_type == 1) {
323  s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
324  s->nb_planes = 1;
325  } else if(s->colorspace_type == 0) {
326  s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
327  s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
328 
329  if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
330  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
331  }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
332  s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
333  }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
334  s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
335  } else {
336  av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
337  s->chroma_h_shift = s->chroma_v_shift = 1;
338  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
339  return AVERROR_INVALIDDATA;
340  }
341  s->nb_planes = 3;
342  } else {
343  av_log(s, AV_LOG_ERROR, "unsupported color space\n");
344  s->chroma_h_shift = s->chroma_v_shift = 1;
345  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
346  return AVERROR_INVALIDDATA;
347  }
348 
349 
350  s->spatial_scalability= get_rac(&s->c, s->header_state);
351 // s->rate_scalability= get_rac(&s->c, s->header_state);
352  GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
353  s->max_ref_frames++;
354 
355  decode_qlogs(s);
356  }
357 
358  if(!s->keyframe){
359  if(get_rac(&s->c, s->header_state)){
360  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
361  int htaps, i, sum=0;
362  Plane *p= &s->plane[plane_index];
363  p->diag_mc= get_rac(&s->c, s->header_state);
364  htaps= get_symbol(&s->c, s->header_state, 0);
365  if((unsigned)htaps >= HTAPS_MAX/2 - 1)
366  return AVERROR_INVALIDDATA;
367  htaps = htaps*2 + 2;
368  p->htaps= htaps;
369  for(i= htaps/2; i; i--){
370  unsigned hcoeff = get_symbol(&s->c, s->header_state, 0);
371  if (hcoeff > 127)
372  return AVERROR_INVALIDDATA;
373  p->hcoeff[i]= hcoeff * (1-2*(i&1));
374  sum += p->hcoeff[i];
375  }
376  p->hcoeff[0]= 32-sum;
377  }
378  s->plane[2].diag_mc= s->plane[1].diag_mc;
379  s->plane[2].htaps = s->plane[1].htaps;
380  memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
381  }
382  if(get_rac(&s->c, s->header_state)){
383  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
384  decode_qlogs(s);
385  }
386  }
387 
388  s->spatial_decomposition_type+= (unsigned)get_symbol(&s->c, s->header_state, 1);
389  if(s->spatial_decomposition_type > 1U){
390  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
391  return AVERROR_INVALIDDATA;
392  }
393  if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
394  s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
395  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
396  return AVERROR_INVALIDDATA;
397  }
398  if (s->avctx->width > 65536-4) {
399  av_log(s->avctx, AV_LOG_ERROR, "Width %d is too large\n", s->avctx->width);
400  return AVERROR_INVALIDDATA;
401  }
402 
403 
404  s->qlog += (unsigned)get_symbol(&s->c, s->header_state, 1);
405  s->mv_scale += (unsigned)get_symbol(&s->c, s->header_state, 1);
406  s->qbias += (unsigned)get_symbol(&s->c, s->header_state, 1);
407  s->block_max_depth+= (unsigned)get_symbol(&s->c, s->header_state, 1);
408  if(s->block_max_depth > 1 || s->block_max_depth < 0 || s->mv_scale > 256U){
409  av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
410  s->block_max_depth= 0;
411  s->mv_scale = 0;
412  return AVERROR_INVALIDDATA;
413  }
414  if (FFABS(s->qbias) > 127) {
415  av_log(s->avctx, AV_LOG_ERROR, "qbias %d is too large\n", s->qbias);
416  s->qbias = 0;
417  return AVERROR_INVALIDDATA;
418  }
419 
420  return 0;
421 }
422 
424  int x, y;
425  int w= s->b_width;
426  int h= s->b_height;
427  int res;
428 
429  for(y=0; y<h; y++){
430  for(x=0; x<w; x++){
431  if (s->c.bytestream >= s->c.bytestream_end)
432  return AVERROR_INVALIDDATA;
433  if ((res = decode_q_branch(s, 0, x, y)) < 0)
434  return res;
435  }
436  }
437  return 0;
438 }
439 
440 static int decode_frame(AVCodecContext *avctx, AVFrame *picture,
441  int *got_frame, AVPacket *avpkt)
442 {
443  const uint8_t *buf = avpkt->data;
444  int buf_size = avpkt->size;
445  SnowContext *s = avctx->priv_data;
446  RangeCoder * const c= &s->c;
447  int bytes_read;
448  int level, orientation, plane_index;
449  int res;
450 
451  ff_init_range_decoder(c, buf, buf_size);
452  ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
453 
454  s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
455  if ((res = decode_header(s)) < 0)
456  return res;
457  if ((res=ff_snow_common_init_after_header(avctx)) < 0)
458  return res;
459 
460  // realloc slice buffer for the case that spatial_decomposition_count changed
462  if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
463  (MB_SIZE >> s->block_max_depth) +
464  s->spatial_decomposition_count * 11 + 1,
465  s->plane[0].width,
466  s->spatial_idwt_buffer)) < 0)
467  return res;
468 
469  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
470  Plane *p= &s->plane[plane_index];
471  p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
472  && p->hcoeff[1]==-10
473  && p->hcoeff[2]==2;
474  }
475 
477 
478  if((res = ff_snow_frame_start(s)) < 0)
479  return res;
480 
481  s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
482 
483  //keyframe flag duplication mess FIXME
484  if(avctx->debug&FF_DEBUG_PICT_INFO)
485  av_log(avctx, AV_LOG_ERROR,
486  "keyframe:%d qlog:%d qbias: %d mvscale: %d "
487  "decomposition_type:%d decomposition_count:%d\n",
488  s->keyframe, s->qlog, s->qbias, s->mv_scale,
489  s->spatial_decomposition_type,
490  s->spatial_decomposition_count
491  );
492 
493  if (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) {
494  size_t size;
495  res = av_size_mult(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2), &size);
496  if (res)
497  return res;
498  av_fast_malloc(&s->avmv, &s->avmv_size, size);
499  if (!s->avmv)
500  return AVERROR(ENOMEM);
501  } else {
502  s->avmv_size = 0;
503  av_freep(&s->avmv);
504  }
505  s->avmv_index = 0;
506 
507  if ((res = decode_blocks(s)) < 0)
508  return res;
509 
510  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
511  Plane *p= &s->plane[plane_index];
512  int w= p->width;
513  int h= p->height;
514  int x, y;
515  int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
516 
517  if(s->avctx->debug&2048){
518  memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
519  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
520 
521  for(y=0; y<h; y++){
522  for(x=0; x<w; x++){
523  int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
524  s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
525  }
526  }
527  }
528 
529  for(level=0; level<s->spatial_decomposition_count; level++){
530  for(orientation=level ? 1 : 0; orientation<4; orientation++){
531  SubBand *b= &p->band[level][orientation];
532  unpack_coeffs(s, b, b->parent, orientation);
533  }
534  }
535 
536  {
537  const int mb_h= s->b_height << s->block_max_depth;
538  const int block_size = MB_SIZE >> s->block_max_depth;
539  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
540  int mb_y;
542  int yd=0, yq=0;
543  int y;
544  int end_y;
545 
546  ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
547  for(mb_y=0; mb_y<=mb_h; mb_y++){
548 
549  int slice_starty = block_h*mb_y;
550  int slice_h = block_h*(mb_y+1);
551 
552  if (!(s->keyframe || s->avctx->debug&512)){
553  slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
554  slice_h -= (block_h >> 1);
555  }
556 
557  for(level=0; level<s->spatial_decomposition_count; level++){
558  for(orientation=level ? 1 : 0; orientation<4; orientation++){
559  SubBand *b= &p->band[level][orientation];
560  int start_y;
561  int end_y;
562  int our_mb_start = mb_y;
563  int our_mb_end = (mb_y + 1);
564  const int extra= 3;
565  start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
566  end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
567  if (!(s->keyframe || s->avctx->debug&512)){
568  start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
569  end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
570  }
571  start_y = FFMIN(b->height, start_y);
572  end_y = FFMIN(b->height, end_y);
573 
574  if (start_y != end_y){
575  if (orientation == 0){
576  SubBand * correlate_band = &p->band[0][0];
577  int correlate_end_y = FFMIN(b->height, end_y + 1);
578  int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
579  decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
580  correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
581  dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
582  }
583  else
584  decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
585  }
586  }
587  }
588 
589  for(; yd<slice_h; yd+=4){
590  ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
591  }
592 
593  if(s->qlog == LOSSLESS_QLOG){
594  for(; yq<slice_h && yq<h; yq++){
595  IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
596  for(x=0; x<w; x++){
597  line[x] *= 1<<FRAC_BITS;
598  }
599  }
600  }
601 
602  predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
603 
604  y = FFMIN(p->height, slice_starty);
605  end_y = FFMIN(p->height, slice_h);
606  while(y < end_y)
607  ff_slice_buffer_release(&s->sb, y++);
608  }
609 
610  ff_slice_buffer_flush(&s->sb);
611  }
612 
613  }
614 
615  emms_c();
616 
617  ff_snow_release_buffer(avctx);
618 
619  if(!(s->avctx->debug&2048))
620  res = av_frame_ref(picture, s->current_picture);
621  else
622  res = av_frame_ref(picture, s->mconly_picture);
623  if (res >= 0 && s->avmv_index) {
624  AVFrameSideData *sd;
625 
626  sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
627  if (!sd)
628  return AVERROR(ENOMEM);
629  memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
630  }
631 
632  if (res < 0)
633  return res;
634 
635  *got_frame = 1;
636 
637  bytes_read= c->bytestream - c->bytestream_start;
638  if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
639 
640  return bytes_read;
641 }
642 
644 {
645  SnowContext *s = avctx->priv_data;
646 
648 
650 
651  s->avmv_size = 0;
652  av_freep(&s->avmv);
653 
654  return 0;
655 }
656 
658  .p.name = "snow",
659  CODEC_LONG_NAME("Snow"),
660  .p.type = AVMEDIA_TYPE_VIDEO,
661  .p.id = AV_CODEC_ID_SNOW,
662  .priv_data_size = sizeof(SnowContext),
664  .close = decode_end,
666  .p.capabilities = AV_CODEC_CAP_DR1,
667  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
668 };
av_size_mult
int av_size_mult(size_t a, size_t b, size_t *r)
Multiply two size_t values checking for overflow.
Definition: mem.c:565
BlockNode::color
uint8_t color[3]
Color for intra.
Definition: snow.h:57
AVMotionVector::motion_scale
uint16_t motion_scale
Definition: motion_vector.h:54
decode_q_branch
static int decode_q_branch(SnowContext *s, int level, int x, int y)
Definition: snowdec.c:156
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:464
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:509
GET_S
#define GET_S(dst, check)
Definition: snowdec.c:292
MAX_DECOMPOSITIONS
#define MAX_DECOMPOSITIONS
Definition: dirac_dwt.h:30
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
Plane::fast_mc
int fast_mc
Definition: snow.h:109
MID_STATE
#define MID_STATE
Definition: snow.h:42
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:241
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:239
AVMotionVector
Definition: motion_vector.h:24
inverse
inverse
Definition: af_crystalizer.c:121
AVMotionVector::src_x
int16_t src_x
Absolute source position.
Definition: motion_vector.h:38
ff_slice_buffer_flush
void ff_slice_buffer_flush(slice_buffer *buf)
Definition: snow_dwt.c:91
ff_snow_decoder
const FFCodec ff_snow_decoder
Definition: snowdec.c:657
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:165
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:41
rangecoder.h
FFCodec
Definition: codec_internal.h:127
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *avpkt)
Definition: snowdec.c:440
SnowContext
Definition: snow.h:116
QSHIFT
#define QSHIFT
Definition: snow.h:45
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:49
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:628
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1385
Plane::diag_mc
int diag_mc
Definition: snow.h:108
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:58
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
BlockNode
Definition: snow.h:53
ff_slice_buffer_init
int ff_slice_buffer_init(slice_buffer *buf, int line_count, int max_allocated_lines, int line_width, IDWTELEM *base_buffer)
Definition: snow_dwt.c:28
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVMotionVector::motion_x
int32_t motion_x
Motion vector src_x = dst_x + motion_x / motion_scale src_y = dst_y + motion_y / motion_scale.
Definition: motion_vector.h:53
AVMotionVector::src_y
int16_t src_y
Definition: motion_vector.h:38
DWTCompose
Definition: dirac_dwt.h:32
ff_slice_buffer_destroy
void ff_slice_buffer_destroy(slice_buffer *buf)
Definition: snow_dwt.c:103
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:507
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_spatial_idwt_buffered_init
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width, int height, int stride_line, int type, int decomposition_count)
Definition: snow_dwt.c:639
emms_c
#define emms_c()
Definition: emms.h:63
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
AVMotionVector::h
uint8_t h
Definition: motion_vector.h:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:47
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:282
htaps
static const double htaps[HTAPS]
The 2nd half (48 coeffs) of a 96-tap symmetric lowpass filter.
Definition: dsd.c:52
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:426
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
correlate_slice_buffered
static void correlate_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y)
Definition: snowdec.c:246
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:67
predict_slice_buffered
static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer *sb, IDWTELEM *old_buffer, int plane_index, int add, int mb_y)
Definition: snowdec.c:33
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:55
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:583
mathops.h
AVMotionVector::motion_y
int32_t motion_y
Definition: motion_vector.h:53
QROOT
#define QROOT
Definition: snow.h:46
decode_subband_slice_buffered
static void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer *sb, int start_y, int h, int save_state[1])
Definition: snowdec.c:115
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:55
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:109
codec_internal.h
Plane::height
int height
Definition: cfhd.h:119
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
ff_slice_buffer_release
void ff_slice_buffer_release(slice_buffer *buf, int line)
Definition: snow_dwt.c:78
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
AVMotionVector::flags
uint64_t flags
Extra flag information.
Definition: motion_vector.h:47
SubBand
Definition: cfhd.h:108
Plane::htaps
int htaps
Definition: snow.h:106
Plane::width
int width
Definition: cfhd.h:118
line
Definition: graph2dot.c:48
snow_dwt.h
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:107
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
unpack_coeffs
static void unpack_coeffs(SnowContext *s, SubBand *b, SubBand *parent, int orientation)
Definition: snow.h:605
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
decode_qlogs
static void decode_qlogs(SnowContext *s)
Definition: snowdec.c:276
AVMotionVector::dst_y
int16_t dst_y
Definition: motion_vector.h:42
log.h
dequantize_slice_buffered
static void dequantize_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y)
Definition: snowdec.c:223
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:244
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1391
AVMotionVector::dst_x
int16_t dst_x
Absolute destination position.
Definition: motion_vector.h:42
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:60
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:99
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:457
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
ff_snow_frame_start
int ff_snow_frame_start(SnowContext *s)
Definition: snow.c:592
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:127
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
mid_pred
#define mid_pred
Definition: mathops.h:98
decode_header
static int decode_header(SnowContext *s)
Definition: snowdec.c:300
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:94
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:437
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:266
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVMotionVector::source
int32_t source
Where the current macroblock comes from; negative value when it comes from the past,...
Definition: motion_vector.h:30
Plane
Definition: cfhd.h:117
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1384
slice_buffer_get_line
#define slice_buffer_get_line(slice_buf, line_num)
Definition: snow_dwt.h:89
BlockNode::level
uint8_t level
Definition: snow.h:63
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:398
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:54
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:464
AVPacket
This structure stores compressed data.
Definition: packet.h:351
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:78
null_block
static const BlockNode null_block
Definition: snow.h:66
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:62
ff_spatial_idwt_buffered_slice
void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs, slice_buffer *slice_buf, IDWTELEM *temp, int width, int height, int stride_line, int type, int decomposition_count, int y)
Definition: snow_dwt.c:658
decode_blocks
static int decode_blocks(SnowContext *s)
Definition: snowdec.c:423
int
int
Definition: ffmpeg_filter.c:331
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:108
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: snowdec.c:643
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:56
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
AVMotionVector::w
uint8_t w
Width and height of the block.
Definition: motion_vector.h:34
intmath.h