FFmpeg
snowdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/intmath.h"
22 #include "libavutil/log.h"
23 #include "libavutil/opt.h"
24 #include "avcodec.h"
25 #include "snow_dwt.h"
26 #include "internal.h"
27 #include "snow.h"
28 
29 #include "rangecoder.h"
30 #include "mathops.h"
31 
32 #include "mpegvideo.h"
33 #include "h263.h"
34 
35 static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
36  Plane *p= &s->plane[plane_index];
37  const int mb_w= s->b_width << s->block_max_depth;
38  const int mb_h= s->b_height << s->block_max_depth;
39  int x, y, mb_x;
40  int block_size = MB_SIZE >> s->block_max_depth;
41  int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
42  int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
43  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
44  int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
45  int ref_stride= s->current_picture->linesize[plane_index];
46  uint8_t *dst8= s->current_picture->data[plane_index];
47  int w= p->width;
48  int h= p->height;
49 
50  if(s->keyframe || (s->avctx->debug&512)){
51  if(mb_y==mb_h)
52  return;
53 
54  if(add){
55  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
56 // DWTELEM * line = slice_buffer_get_line(sb, y);
57  IDWTELEM * line = sb->line[y];
58  for(x=0; x<w; x++){
59 // int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
60  int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
61  v >>= FRAC_BITS;
62  if(v&(~255)) v= ~(v>>31);
63  dst8[x + y*ref_stride]= v;
64  }
65  }
66  }else{
67  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
68 // DWTELEM * line = slice_buffer_get_line(sb, y);
69  IDWTELEM * line = sb->line[y];
70  for(x=0; x<w; x++){
71  line[x] -= 128 << FRAC_BITS;
72 // buf[x + y*w]-= 128<<FRAC_BITS;
73  }
74  }
75  }
76 
77  return;
78  }
79 
80  for(mb_x=0; mb_x<=mb_w; mb_x++){
81  add_yblock(s, 1, sb, old_buffer, dst8, obmc,
82  block_w*mb_x - block_w/2,
83  block_h*mb_y - block_h/2,
84  block_w, block_h,
85  w, h,
86  w, ref_stride, obmc_stride,
87  mb_x - 1, mb_y - 1,
88  add, 0, plane_index);
89  }
90 
91  if(s->avmv && mb_y < mb_h && plane_index == 0)
92  for(mb_x=0; mb_x<mb_w; mb_x++){
93  AVMotionVector *avmv = s->avmv + s->avmv_index;
94  const int b_width = s->b_width << s->block_max_depth;
95  const int b_stride= b_width;
96  BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
97 
98  if (bn->type)
99  continue;
100 
101  s->avmv_index++;
102 
103  avmv->w = block_w;
104  avmv->h = block_h;
105  avmv->dst_x = block_w*mb_x - block_w/2;
106  avmv->dst_y = block_h*mb_y - block_h/2;
107  avmv->motion_scale = 8;
108  avmv->motion_x = bn->mx * s->mv_scale;
109  avmv->motion_y = bn->my * s->mv_scale;
110  avmv->src_x = avmv->dst_x + avmv->motion_x / 8;
111  avmv->src_y = avmv->dst_y + avmv->motion_y / 8;
112  avmv->source= -1 - bn->ref;
113  avmv->flags = 0;
114  }
115 }
116 
117 static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
118  const int w= b->width;
119  int y;
120  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
121  int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
122  int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
123  int new_index = 0;
124 
125  if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
126  qadd= 0;
127  qmul= 1<<QEXPSHIFT;
128  }
129 
130  /* If we are on the second or later slice, restore our index. */
131  if (start_y != 0)
132  new_index = save_state[0];
133 
134 
135  for(y=start_y; y<h; y++){
136  int x = 0;
137  int v;
138  IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
139  memset(line, 0, b->width*sizeof(IDWTELEM));
140  v = b->x_coeff[new_index].coeff;
141  x = b->x_coeff[new_index++].x;
142  while(x < w){
143  register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT;
144  register int u= -(v&1);
145  line[x] = (t^u) - u;
146 
147  v = b->x_coeff[new_index].coeff;
148  x = b->x_coeff[new_index++].x;
149  }
150  }
151 
152  /* Save our variables for the next slice. */
153  save_state[0] = new_index;
154 
155  return;
156 }
157 
158 static int decode_q_branch(SnowContext *s, int level, int x, int y){
159  const int w= s->b_width << s->block_max_depth;
160  const int rem_depth= s->block_max_depth - level;
161  const int index= (x + y*w) << rem_depth;
162  int trx= (x+1)<<rem_depth;
163  const BlockNode *left = x ? &s->block[index-1] : &null_block;
164  const BlockNode *top = y ? &s->block[index-w] : &null_block;
165  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
166  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
167  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
168  int res;
169 
170  if(s->keyframe){
172  return 0;
173  }
174 
175  if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
176  int type, mx, my;
177  int l = left->color[0];
178  int cb= left->color[1];
179  int cr= left->color[2];
180  unsigned ref = 0;
181  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
182  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
183  int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
184 
185  type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
186  if(type){
187  int ld, cbd, crd;
188  pred_mv(s, &mx, &my, 0, left, top, tr);
189  ld = get_symbol(&s->c, &s->block_state[32], 1);
190  if (ld < -255 || ld > 255) {
191  return AVERROR_INVALIDDATA;
192  }
193  l += ld;
194  if (s->nb_planes > 2) {
195  cbd = get_symbol(&s->c, &s->block_state[64], 1);
196  crd = get_symbol(&s->c, &s->block_state[96], 1);
197  if (cbd < -255 || cbd > 255 || crd < -255 || crd > 255) {
198  return AVERROR_INVALIDDATA;
199  }
200  cb += cbd;
201  cr += crd;
202  }
203  }else{
204  if(s->ref_frames > 1)
205  ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
206  if (ref >= s->ref_frames) {
207  av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
208  return AVERROR_INVALIDDATA;
209  }
210  pred_mv(s, &mx, &my, ref, left, top, tr);
211  mx+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
212  my+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
213  }
214  set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
215  }else{
216  if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
217  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
218  (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
219  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
220  return res;
221  }
222  return 0;
223 }
224 
225 static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
226  const int w= b->width;
227  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
228  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
229  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
230  int x,y;
231 
232  if(s->qlog == LOSSLESS_QLOG) return;
233 
234  for(y=start_y; y<end_y; y++){
235 // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
236  IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
237  for(x=0; x<w; x++){
238  int i= line[x];
239  if(i<0){
240  line[x]= -((-i*(unsigned)qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
241  }else if(i>0){
242  line[x]= (( i*(unsigned)qmul + qadd)>>(QEXPSHIFT));
243  }
244  }
245  }
246 }
247 
248 static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
249  const int w= b->width;
250  int x,y;
251 
252  IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
253  IDWTELEM * prev;
254 
255  if (start_y != 0)
256  line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
257 
258  for(y=start_y; y<end_y; y++){
259  prev = line;
260 // line = slice_buffer_get_line_from_address(sb, src + (y * stride));
261  line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
262  for(x=0; x<w; x++){
263  if(x){
264  if(use_median){
265  if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
266  else line[x] += line[x - 1];
267  }else{
268  if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
269  else line[x] += line[x - 1];
270  }
271  }else{
272  if(y) line[x] += prev[x];
273  }
274  }
275  }
276 }
277 
278 static void decode_qlogs(SnowContext *s){
279  int plane_index, level, orientation;
280 
281  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
282  for(level=0; level<s->spatial_decomposition_count; level++){
283  for(orientation=level ? 1:0; orientation<4; orientation++){
284  int q;
285  if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
286  else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
287  else q= get_symbol(&s->c, s->header_state, 1);
288  s->plane[plane_index].band[level][orientation].qlog= q;
289  }
290  }
291  }
292 }
293 
294 #define GET_S(dst, check) \
295  tmp= get_symbol(&s->c, s->header_state, 0);\
296  if(!(check)){\
297  av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
298  return AVERROR_INVALIDDATA;\
299  }\
300  dst= tmp;
301 
303  int plane_index, tmp;
304  uint8_t kstate[32];
305 
306  memset(kstate, MID_STATE, sizeof(kstate));
307 
308  s->keyframe= get_rac(&s->c, kstate);
309  if(s->keyframe || s->always_reset){
311  s->spatial_decomposition_type=
312  s->qlog=
313  s->qbias=
314  s->mv_scale=
315  s->block_max_depth= 0;
316  }
317  if(s->keyframe){
318  GET_S(s->version, tmp <= 0U)
319  s->always_reset= get_rac(&s->c, s->header_state);
320  s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
321  s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
322  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
323  s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
324  if (s->colorspace_type == 1) {
325  s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
326  s->nb_planes = 1;
327  } else if(s->colorspace_type == 0) {
328  s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
329  s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
330 
331  if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
332  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
333  }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
334  s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
335  }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
336  s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
337  } else {
338  av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
339  s->chroma_h_shift = s->chroma_v_shift = 1;
340  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
341  return AVERROR_INVALIDDATA;
342  }
343  s->nb_planes = 3;
344  } else {
345  av_log(s, AV_LOG_ERROR, "unsupported color space\n");
346  s->chroma_h_shift = s->chroma_v_shift = 1;
347  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
348  return AVERROR_INVALIDDATA;
349  }
350 
351 
352  s->spatial_scalability= get_rac(&s->c, s->header_state);
353 // s->rate_scalability= get_rac(&s->c, s->header_state);
354  GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
355  s->max_ref_frames++;
356 
357  decode_qlogs(s);
358  }
359 
360  if(!s->keyframe){
361  if(get_rac(&s->c, s->header_state)){
362  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
363  int htaps, i, sum=0;
364  Plane *p= &s->plane[plane_index];
365  p->diag_mc= get_rac(&s->c, s->header_state);
366  htaps= get_symbol(&s->c, s->header_state, 0);
367  if((unsigned)htaps >= HTAPS_MAX/2 - 1)
368  return AVERROR_INVALIDDATA;
369  htaps = htaps*2 + 2;
370  p->htaps= htaps;
371  for(i= htaps/2; i; i--){
372  unsigned hcoeff = get_symbol(&s->c, s->header_state, 0);
373  if (hcoeff > 127)
374  return AVERROR_INVALIDDATA;
375  p->hcoeff[i]= hcoeff * (1-2*(i&1));
376  sum += p->hcoeff[i];
377  }
378  p->hcoeff[0]= 32-sum;
379  }
380  s->plane[2].diag_mc= s->plane[1].diag_mc;
381  s->plane[2].htaps = s->plane[1].htaps;
382  memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
383  }
384  if(get_rac(&s->c, s->header_state)){
385  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
386  decode_qlogs(s);
387  }
388  }
389 
390  s->spatial_decomposition_type+= (unsigned)get_symbol(&s->c, s->header_state, 1);
391  if(s->spatial_decomposition_type > 1U){
392  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
393  return AVERROR_INVALIDDATA;
394  }
395  if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
396  s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
397  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
398  return AVERROR_INVALIDDATA;
399  }
400  if (s->avctx->width > 65536-4) {
401  av_log(s->avctx, AV_LOG_ERROR, "Width %d is too large\n", s->avctx->width);
402  return AVERROR_INVALIDDATA;
403  }
404 
405 
406  s->qlog += (unsigned)get_symbol(&s->c, s->header_state, 1);
407  s->mv_scale += (unsigned)get_symbol(&s->c, s->header_state, 1);
408  s->qbias += (unsigned)get_symbol(&s->c, s->header_state, 1);
409  s->block_max_depth+= (unsigned)get_symbol(&s->c, s->header_state, 1);
410  if(s->block_max_depth > 1 || s->block_max_depth < 0 || s->mv_scale > 256U){
411  av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
412  s->block_max_depth= 0;
413  s->mv_scale = 0;
414  return AVERROR_INVALIDDATA;
415  }
416  if (FFABS(s->qbias) > 127) {
417  av_log(s->avctx, AV_LOG_ERROR, "qbias %d is too large\n", s->qbias);
418  s->qbias = 0;
419  return AVERROR_INVALIDDATA;
420  }
421 
422  return 0;
423 }
424 
426  int x, y;
427  int w= s->b_width;
428  int h= s->b_height;
429  int res;
430 
431  for(y=0; y<h; y++){
432  for(x=0; x<w; x++){
433  if (s->c.bytestream >= s->c.bytestream_end)
434  return AVERROR_INVALIDDATA;
435  if ((res = decode_q_branch(s, 0, x, y)) < 0)
436  return res;
437  }
438  }
439  return 0;
440 }
441 
442 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
443  AVPacket *avpkt)
444 {
445  const uint8_t *buf = avpkt->data;
446  int buf_size = avpkt->size;
447  SnowContext *s = avctx->priv_data;
448  RangeCoder * const c= &s->c;
449  int bytes_read;
450  AVFrame *picture = data;
451  int level, orientation, plane_index;
452  int res;
453 
454  ff_init_range_decoder(c, buf, buf_size);
455  ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
456 
457  s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
458  if ((res = decode_header(s)) < 0)
459  return res;
460  if ((res=ff_snow_common_init_after_header(avctx)) < 0)
461  return res;
462 
463  // realloc slice buffer for the case that spatial_decomposition_count changed
465  if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
466  (MB_SIZE >> s->block_max_depth) +
467  s->spatial_decomposition_count * 11 + 1,
468  s->plane[0].width,
469  s->spatial_idwt_buffer)) < 0)
470  return res;
471 
472  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
473  Plane *p= &s->plane[plane_index];
474  p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
475  && p->hcoeff[1]==-10
476  && p->hcoeff[2]==2;
477  }
478 
480 
481  if((res = ff_snow_frame_start(s)) < 0)
482  return res;
483 
484  s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
485 
486  //keyframe flag duplication mess FIXME
487  if(avctx->debug&FF_DEBUG_PICT_INFO)
488  av_log(avctx, AV_LOG_ERROR,
489  "keyframe:%d qlog:%d qbias: %d mvscale: %d "
490  "decomposition_type:%d decomposition_count:%d\n",
491  s->keyframe, s->qlog, s->qbias, s->mv_scale,
492  s->spatial_decomposition_type,
493  s->spatial_decomposition_count
494  );
495 
496  if (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) {
497  size_t size;
498  res = av_size_mult(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2), &size);
499  if (res)
500  return res;
501  av_fast_malloc(&s->avmv, &s->avmv_size, size);
502  if (!s->avmv)
503  return AVERROR(ENOMEM);
504  } else {
505  s->avmv_size = 0;
506  av_freep(&s->avmv);
507  }
508  s->avmv_index = 0;
509 
510  if ((res = decode_blocks(s)) < 0)
511  return res;
512 
513  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
514  Plane *p= &s->plane[plane_index];
515  int w= p->width;
516  int h= p->height;
517  int x, y;
518  int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
519 
520  if(s->avctx->debug&2048){
521  memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
522  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
523 
524  for(y=0; y<h; y++){
525  for(x=0; x<w; x++){
526  int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
527  s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
528  }
529  }
530  }
531 
532  for(level=0; level<s->spatial_decomposition_count; level++){
533  for(orientation=level ? 1 : 0; orientation<4; orientation++){
534  SubBand *b= &p->band[level][orientation];
535  unpack_coeffs(s, b, b->parent, orientation);
536  }
537  }
538 
539  {
540  const int mb_h= s->b_height << s->block_max_depth;
541  const int block_size = MB_SIZE >> s->block_max_depth;
542  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
543  int mb_y;
545  int yd=0, yq=0;
546  int y;
547  int end_y;
548 
549  ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
550  for(mb_y=0; mb_y<=mb_h; mb_y++){
551 
552  int slice_starty = block_h*mb_y;
553  int slice_h = block_h*(mb_y+1);
554 
555  if (!(s->keyframe || s->avctx->debug&512)){
556  slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
557  slice_h -= (block_h >> 1);
558  }
559 
560  for(level=0; level<s->spatial_decomposition_count; level++){
561  for(orientation=level ? 1 : 0; orientation<4; orientation++){
562  SubBand *b= &p->band[level][orientation];
563  int start_y;
564  int end_y;
565  int our_mb_start = mb_y;
566  int our_mb_end = (mb_y + 1);
567  const int extra= 3;
568  start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
569  end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
570  if (!(s->keyframe || s->avctx->debug&512)){
571  start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
572  end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
573  }
574  start_y = FFMIN(b->height, start_y);
575  end_y = FFMIN(b->height, end_y);
576 
577  if (start_y != end_y){
578  if (orientation == 0){
579  SubBand * correlate_band = &p->band[0][0];
580  int correlate_end_y = FFMIN(b->height, end_y + 1);
581  int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
582  decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
583  correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
584  dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
585  }
586  else
587  decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
588  }
589  }
590  }
591 
592  for(; yd<slice_h; yd+=4){
593  ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
594  }
595 
596  if(s->qlog == LOSSLESS_QLOG){
597  for(; yq<slice_h && yq<h; yq++){
598  IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
599  for(x=0; x<w; x++){
600  line[x] *= 1<<FRAC_BITS;
601  }
602  }
603  }
604 
605  predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
606 
607  y = FFMIN(p->height, slice_starty);
608  end_y = FFMIN(p->height, slice_h);
609  while(y < end_y)
610  ff_slice_buffer_release(&s->sb, y++);
611  }
612 
613  ff_slice_buffer_flush(&s->sb);
614  }
615 
616  }
617 
618  emms_c();
619 
620  ff_snow_release_buffer(avctx);
621 
622  if(!(s->avctx->debug&2048))
623  res = av_frame_ref(picture, s->current_picture);
624  else
625  res = av_frame_ref(picture, s->mconly_picture);
626  if (res >= 0 && s->avmv_index) {
627  AVFrameSideData *sd;
628 
629  sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
630  if (!sd)
631  return AVERROR(ENOMEM);
632  memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
633  }
634 
635  if (res < 0)
636  return res;
637 
638  *got_frame = 1;
639 
640  bytes_read= c->bytestream - c->bytestream_start;
641  if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
642 
643  return bytes_read;
644 }
645 
647 {
648  SnowContext *s = avctx->priv_data;
649 
651 
653 
654  s->avmv_size = 0;
655  av_freep(&s->avmv);
656 
657  return 0;
658 }
659 
661  .name = "snow",
662  .long_name = NULL_IF_CONFIG_SMALL("Snow"),
663  .type = AVMEDIA_TYPE_VIDEO,
664  .id = AV_CODEC_ID_SNOW,
665  .priv_data_size = sizeof(SnowContext),
667  .close = decode_end,
668  .decode = decode_frame,
669  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
670  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
672 };
BlockNode::color
uint8_t color[3]
Color for intra.
Definition: snow.h:55
AVMotionVector::motion_scale
uint16_t motion_scale
Definition: motion_vector.h:54
decode_q_branch
static int decode_q_branch(SnowContext *s, int level, int x, int y)
Definition: snowdec.c:158
AVCodec
AVCodec.
Definition: codec.h:197
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:463
level
uint8_t level
Definition: svq3.c:206
av_clip
#define av_clip
Definition: common.h:122
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:508
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
GET_S
#define GET_S(dst, check)
Definition: snowdec.c:294
MAX_DECOMPOSITIONS
#define MAX_DECOMPOSITIONS
Definition: dirac_dwt.h:30
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
Plane::fast_mc
int fast_mc
Definition: snow.h:107
MID_STATE
#define MID_STATE
Definition: snow.h:40
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
AVMotionVector
Definition: motion_vector.h:24
ff_qexp
uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
AVMotionVector::src_x
int16_t src_x
Absolute source position.
Definition: motion_vector.h:38
ff_slice_buffer_flush
void ff_slice_buffer_flush(slice_buffer *buf)
Definition: snow_dwt.c:91
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:726
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:164
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
w
uint8_t w
Definition: llviddspenc.c:39
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:142
rangecoder.h
mpegvideo.h
SnowContext
Definition: snow.h:114
QSHIFT
#define QSHIFT
Definition: snow.h:43
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:47
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:699
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1624
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: snowdec.c:442
Plane::diag_mc
int diag_mc
Definition: snow.h:106
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:56
U
#define U(x)
Definition: vp56_arith.h:37
BlockNode
Definition: snow.h:51
ff_slice_buffer_init
int ff_slice_buffer_init(slice_buffer *buf, int line_count, int max_allocated_lines, int line_width, IDWTELEM *base_buffer)
Definition: snow_dwt.c:28
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVMotionVector::motion_x
int32_t motion_x
Motion vector src_x = dst_x + motion_x / motion_scale src_y = dst_y + motion_y / motion_scale.
Definition: motion_vector.h:53
AVMotionVector::src_y
int16_t src_y
Definition: motion_vector.h:38
DWTCompose
Definition: dirac_dwt.h:32
ff_slice_buffer_destroy
void ff_slice_buffer_destroy(slice_buffer *buf)
Definition: snow_dwt.c:103
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:521
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
ff_spatial_idwt_buffered_init
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width, int height, int stride_line, int type, int decomposition_count)
Definition: snow_dwt.c:639
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
width
#define width
AVMotionVector::h
uint8_t h
Definition: motion_vector.h:34
s
#define s(width, name)
Definition: cbs_vp9.c:257
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:45
av_size_mult
static int av_size_mult(size_t a, size_t b, size_t *r)
Multiply two size_t values checking for overflow.
Definition: mem.h:675
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:281
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:439
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
correlate_slice_buffered
static void correlate_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y)
Definition: snowdec.c:248
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:65
predict_slice_buffered
static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer *sb, IDWTELEM *old_buffer, int plane_index, int add, int mb_y)
Definition: snowdec.c:35
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:53
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:646
mathops.h
AVMotionVector::motion_y
int32_t motion_y
Definition: motion_vector.h:53
QROOT
#define QROOT
Definition: snow.h:44
decode_subband_slice_buffered
static void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer *sb, int start_y, int h, int save_state[1])
Definition: snowdec.c:117
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:112
Plane::height
int height
Definition: cfhd.h:122
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
ff_slice_buffer_release
void ff_slice_buffer_release(slice_buffer *buf, int line)
Definition: snow_dwt.c:78
AVFrameSideData::data
uint8_t * data
Definition: frame.h:222
AVMotionVector::flags
uint64_t flags
Extra flag information.
Definition: motion_vector.h:47
SubBand
Definition: cfhd.h:111
Plane::htaps
int htaps
Definition: snow.h:104
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
Plane::width
int width
Definition: cfhd.h:121
line
Definition: graph2dot.c:48
snow_dwt.h
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:105
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
unpack_coeffs
static void unpack_coeffs(SnowContext *s, SubBand *b, SubBand *parent, int orientation)
Definition: snow.h:604
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
decode_qlogs
static void decode_qlogs(SnowContext *s)
Definition: snowdec.c:278
i
int i
Definition: input.c:407
AVMotionVector::dst_y
int16_t dst_y
Definition: motion_vector.h:42
log.h
dequantize_slice_buffered
static void dequantize_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y)
Definition: snowdec.c:225
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1390
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
AVMotionVector::dst_x
int16_t dst_x
Absolute destination position.
Definition: motion_vector.h:42
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:58
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:99
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:456
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
ff_snow_frame_start
int ff_snow_frame_start(SnowContext *s)
Definition: snow.c:661
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:127
avcodec.h
mid_pred
#define mid_pred
Definition: mathops.h:97
decode_header
static int decode_header(SnowContext *s)
Definition: snowdec.c:302
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:97
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:262
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:33
ff_snow_decoder
AVCodec ff_snow_decoder
Definition: snowdec.c:660
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVMotionVector::source
int32_t source
Where the current macroblock comes from; negative value when it comes from the past,...
Definition: motion_vector.h:30
Plane
Definition: cfhd.h:120
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1623
slice_buffer_get_line
#define slice_buffer_get_line(slice_buf, line_num)
Definition: snow_dwt.h:89
BlockNode::level
uint8_t level
Definition: snow.h:61
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:36
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:403
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:133
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:52
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:220
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
AVPacket
This structure stores compressed data.
Definition: packet.h:346
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
htaps
static const double htaps[HTAPS]
The 2nd half (48 coeffs) of a 96-tap symmetric lowpass filter.
Definition: dsd_tablegen.h:55
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:76
inverse
static uint32_t inverse(uint32_t v)
find multiplicative inverse modulo 2 ^ 32
Definition: asfcrypt.c:35
null_block
static const BlockNode null_block
Definition: snow.h:64
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:61
ff_spatial_idwt_buffered_slice
void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs, slice_buffer *slice_buf, IDWTELEM *temp, int width, int height, int stride_line, int type, int decomposition_count, int y)
Definition: snow_dwt.c:658
decode_blocks
static int decode_blocks(SnowContext *s)
Definition: snowdec.c:425
int
int
Definition: ffmpeg_filter.c:170
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:111
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: snowdec.c:646
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:54
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
AVMotionVector::w
uint8_t w
Width and height of the block.
Definition: motion_vector.h:34
h263.h
intmath.h