FFmpeg
snowenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/libm.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avcodec.h"
29 #include "codec_internal.h"
30 #include "encode.h"
31 #include "internal.h" //For AVCodecInternal.recon_frame
32 #include "me_cmp.h"
33 #include "qpeldsp.h"
34 #include "snow_dwt.h"
35 #include "snow.h"
36 
37 #include "rangecoder.h"
38 #include "mathops.h"
39 
40 #include "mpegvideo.h"
41 #include "h263enc.h"
42 
43 #define FF_ME_ITER 3
44 
45 typedef struct SnowEncContext {
49 
50  int lambda;
51  int lambda2;
52  int pass1_rc;
53 
54  int pred;
55  int memc_only;
61 
63  MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MPVEncContext, so this will be removed then (FIXME/XXX)
65 #define ME_CACHE_SIZE 1024
68 
70 
71  uint8_t *emu_edge_buffer;
72 
75 
76 #define PTR_ADD(ptr, off) ((ptr) ? (ptr) + (off) : NULL)
77 
78 static void init_ref(MotionEstContext *c, const uint8_t *const src[3],
79  uint8_t *const ref[3], uint8_t *const ref2[3],
80  int x, int y, int ref_index)
81 {
82  SnowContext *s = c->avctx->priv_data;
83  const int offset[3] = {
84  y*c-> stride + x,
85  ((y*c->uvstride + x) >> s->chroma_h_shift),
86  ((y*c->uvstride + x) >> s->chroma_h_shift),
87  };
88  for (int i = 0; i < 3; i++) {
89  c->src[0][i] = src [i];
90  c->ref[0][i] = PTR_ADD(ref[i], offset[i]);
91  }
92  av_assert2(!ref_index);
93 }
94 
95 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
96 {
97  if (v) {
98  const int a = FFABS(v);
99  const int e = av_log2(a);
100  const int el = FFMIN(e, 10);
101  int i;
102 
103  put_rac(c, state + 0, 0);
104 
105  for (i = 0; i < el; i++)
106  put_rac(c, state + 1 + i, 1); //1..10
107  for(; i < e; i++)
108  put_rac(c, state + 1 + 9, 1); //1..10
109  put_rac(c, state + 1 + FFMIN(i, 9), 0);
110 
111  for (i = e - 1; i >= el; i--)
112  put_rac(c, state + 22 + 9, (a >> i) & 1); //22..31
113  for(; i >= 0; i--)
114  put_rac(c, state + 22 + i, (a >> i) & 1); //22..31
115 
116  if (is_signed)
117  put_rac(c, state + 11 + el, v < 0); //11..21
118  } else {
119  put_rac(c, state + 0, 1);
120  }
121 }
122 
123 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
124 {
125  int r = log2 >= 0 ? 1<<log2 : 1;
126 
127  av_assert2(v >= 0);
128  av_assert2(log2 >= -4);
129 
130  while (v >= r) {
131  put_rac(c, state + 4 + log2, 1);
132  v -= r;
133  log2++;
134  if (log2 > 0) r += r;
135  }
136  put_rac(c, state + 4 + log2, 0);
137 
138  for (int i = log2 - 1; i >= 0; i--)
139  put_rac(c, state + 31 - i, (v >> i) & 1);
140 }
141 
143 {
144  int ret;
145 
146  frame->width = s->avctx->width + 2 * EDGE_WIDTH;
147  frame->height = s->avctx->height + 2 * EDGE_WIDTH;
148 
149  ret = ff_encode_alloc_frame(s->avctx, frame);
150  if (ret < 0)
151  return ret;
152  for (int i = 0; frame->data[i]; i++) {
153  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
154  frame->linesize[i] +
155  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
156  frame->data[i] += offset;
157  }
158  frame->width = s->avctx->width;
159  frame->height = s->avctx->height;
160 
161  return 0;
162 }
163 
165 {
166  SnowEncContext *const enc = avctx->priv_data;
167  SnowContext *const s = &enc->com;
168  MPVEncContext *const mpv = &enc->m.s;
169  int plane_index, ret;
170  int i;
171 
172  if (enc->pred == DWT_97
173  && (avctx->flags & AV_CODEC_FLAG_QSCALE)
174  && avctx->global_quality == 0){
175  av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
176  return AVERROR(EINVAL);
177  }
178 
179  s->spatial_decomposition_type = enc->pred; //FIXME add decorrelator type r transform_type
180 
181  s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
182  s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
183 
184  for(plane_index=0; plane_index<3; plane_index++){
185  s->plane[plane_index].diag_mc= 1;
186  s->plane[plane_index].htaps= 6;
187  s->plane[plane_index].hcoeff[0]= 40;
188  s->plane[plane_index].hcoeff[1]= -10;
189  s->plane[plane_index].hcoeff[2]= 2;
190  s->plane[plane_index].fast_mc= 1;
191  }
192 
193  // Must be before ff_snow_common_init()
194  ff_hpeldsp_init(&s->hdsp, avctx->flags);
195  if ((ret = ff_snow_common_init(avctx)) < 0) {
196  return ret;
197  }
198 
199 #define mcf(dx,dy)\
200  enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
201  enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
202  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
203  enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
204  enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
205  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
206 
207  mcf( 0, 0)
208  mcf( 4, 0)
209  mcf( 8, 0)
210  mcf(12, 0)
211  mcf( 0, 4)
212  mcf( 4, 4)
213  mcf( 8, 4)
214  mcf(12, 4)
215  mcf( 0, 8)
216  mcf( 4, 8)
217  mcf( 8, 8)
218  mcf(12, 8)
219  mcf( 0,12)
220  mcf( 4,12)
221  mcf( 8,12)
222  mcf(12,12)
223 
224  ff_me_cmp_init(&enc->mecc, avctx);
225  ret = ff_me_init(&mpv->me, avctx, &enc->mecc, 0);
226  if (ret < 0)
227  return ret;
228  ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
229 
231 
232  s->version=0;
233 
234  mpv->c.avctx = avctx;
235  enc->m.bit_rate = avctx->bit_rate;
236  enc->m.lmin = avctx->mb_lmin;
237  enc->m.lmax = avctx->mb_lmax;
238  mpv->c.mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
239 
240  mpv->me.temp =
241  mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
242  if (!mpv->me.scratchpad)
243  return AVERROR(ENOMEM);
244 
246 
247  s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
248 
249  if(avctx->flags&AV_CODEC_FLAG_PASS1){
250  if(!avctx->stats_out)
251  avctx->stats_out = av_mallocz(256);
252 
253  if (!avctx->stats_out)
254  return AVERROR(ENOMEM);
255  }
256  if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
257  ret = ff_rate_control_init(&enc->m);
258  if(ret < 0)
259  return ret;
260  }
262 
263  switch(avctx->pix_fmt){
264  case AV_PIX_FMT_YUV444P:
265 // case AV_PIX_FMT_YUV422P:
266  case AV_PIX_FMT_YUV420P:
267 // case AV_PIX_FMT_YUV411P:
268  case AV_PIX_FMT_YUV410P:
269  s->nb_planes = 3;
270  s->colorspace_type= 0;
271  break;
272  case AV_PIX_FMT_GRAY8:
273  s->nb_planes = 1;
274  s->colorspace_type = 1;
275  break;
276 /* case AV_PIX_FMT_RGB32:
277  s->colorspace= 1;
278  break;*/
279  }
280 
281  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
282  &s->chroma_v_shift);
283  if (ret)
284  return ret;
285 
286  s->input_picture = av_frame_alloc();
287  if (!s->input_picture)
288  return AVERROR(ENOMEM);
289 
290  if ((ret = get_encode_buffer(s, s->input_picture)) < 0)
291  return ret;
292 
293  enc->emu_edge_buffer = av_calloc(avctx->width + 128, 2 * (2 * MB_SIZE + HTAPS_MAX - 1));
294  if (!enc->emu_edge_buffer)
295  return AVERROR(ENOMEM);
296 
297  if (enc->motion_est == FF_ME_ITER) {
298  int size= s->b_width * s->b_height << 2*s->block_max_depth;
299  for(i=0; i<s->max_ref_frames; i++){
300  s->ref_mvs[i] = av_calloc(size, sizeof(*s->ref_mvs[i]));
301  s->ref_scores[i] = av_calloc(size, sizeof(*s->ref_scores[i]));
302  if (!s->ref_mvs[i] || !s->ref_scores[i])
303  return AVERROR(ENOMEM);
304  }
305  }
306 
307  return 0;
308 }
309 
310 //near copy & paste from dsputil, FIXME
311 static int pix_sum(const uint8_t * pix, int line_size, int w, int h)
312 {
313  int s, i, j;
314 
315  s = 0;
316  for (i = 0; i < h; i++) {
317  for (j = 0; j < w; j++) {
318  s += pix[0];
319  pix ++;
320  }
321  pix += line_size - w;
322  }
323  return s;
324 }
325 
326 //near copy & paste from dsputil, FIXME
327 static int pix_norm1(const uint8_t * pix, int line_size, int w)
328 {
329  int s, i, j;
330  const uint32_t *sq = ff_square_tab + 256;
331 
332  s = 0;
333  for (i = 0; i < w; i++) {
334  for (j = 0; j < w; j ++) {
335  s += sq[pix[0]];
336  pix ++;
337  }
338  pix += line_size - w;
339  }
340  return s;
341 }
342 
343 static inline int get_penalty_factor(int lambda, int lambda2, int type){
344  switch(type&0xFF){
345  default:
346  case FF_CMP_SAD:
347  return lambda>>FF_LAMBDA_SHIFT;
348  case FF_CMP_DCT:
349  return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
350  case FF_CMP_W53:
351  return (4*lambda)>>(FF_LAMBDA_SHIFT);
352  case FF_CMP_W97:
353  return (2*lambda)>>(FF_LAMBDA_SHIFT);
354  case FF_CMP_SATD:
355  case FF_CMP_DCT264:
356  return (2*lambda)>>FF_LAMBDA_SHIFT;
357  case FF_CMP_RD:
358  case FF_CMP_PSNR:
359  case FF_CMP_SSE:
360  case FF_CMP_NSSE:
361  return lambda2>>FF_LAMBDA_SHIFT;
362  case FF_CMP_BIT:
363  return 1;
364  }
365 }
366 
367 //FIXME copy&paste
368 #define P_LEFT P[1]
369 #define P_TOP P[2]
370 #define P_TOPRIGHT P[3]
371 #define P_MEDIAN P[4]
372 #define P_MV1 P[9]
373 #define FLAG_QPEL 1 //must be 1
374 
375 static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
376 {
377  SnowContext *const s = &enc->com;
378  MotionEstContext *const c = &enc->m.s.me;
379  uint8_t p_buffer[1024];
380  uint8_t i_buffer[1024];
381  uint8_t p_state[sizeof(s->block_state)];
382  uint8_t i_state[sizeof(s->block_state)];
383  RangeCoder pc, ic;
384  uint8_t *pbbak= s->c.bytestream;
385  uint8_t *pbbak_start= s->c.bytestream_start;
386  int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
387  const int w= s->b_width << s->block_max_depth;
388  const int h= s->b_height << s->block_max_depth;
389  const int rem_depth= s->block_max_depth - level;
390  const int index= (x + y*w) << rem_depth;
391  const int block_w= 1<<(LOG2_MB_SIZE - level);
392  int trx= (x+1)<<rem_depth;
393  int try= (y+1)<<rem_depth;
394  const BlockNode *left = x ? &s->block[index-1] : &null_block;
395  const BlockNode *top = y ? &s->block[index-w] : &null_block;
396  const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
397  const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
398  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
399  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
400  int pl = left->color[0];
401  int pcb= left->color[1];
402  int pcr= left->color[2];
403  int pmx, pmy;
404  int mx=0, my=0;
405  int l,cr,cb;
406  const int stride= s->current_picture->linesize[0];
407  const int uvstride= s->current_picture->linesize[1];
408  const uint8_t *const current_data[3] = { s->input_picture->data[0] + (x + y* stride)*block_w,
409  PTR_ADD(s->input_picture->data[1], ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)),
410  PTR_ADD(s->input_picture->data[2], ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift))};
411  int P[10][2];
412  int16_t last_mv[3][2];
413  int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
414  const int shift= 1+qpel;
415  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
416  int mx_context= av_log2(2*FFABS(left->mx - top->mx));
417  int my_context= av_log2(2*FFABS(left->my - top->my));
418  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
419  int ref, best_ref, ref_score, ref_mx, ref_my;
420  int range = MAX_MV >> (1 + qpel);
421 
422  av_assert0(sizeof(s->block_state) >= 256);
423  if(s->keyframe){
424  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
425  return 0;
426  }
427 
428 // clip predictors / edge ?
429 
430  P_LEFT[0]= left->mx;
431  P_LEFT[1]= left->my;
432  P_TOP [0]= top->mx;
433  P_TOP [1]= top->my;
434  P_TOPRIGHT[0]= tr->mx;
435  P_TOPRIGHT[1]= tr->my;
436 
437  last_mv[0][0]= s->block[index].mx;
438  last_mv[0][1]= s->block[index].my;
439  last_mv[1][0]= right->mx;
440  last_mv[1][1]= right->my;
441  last_mv[2][0]= bottom->mx;
442  last_mv[2][1]= bottom->my;
443 
444  enc->m.s.c.mb_stride = 2;
445  enc->m.s.c.mb_x =
446  enc->m.s.c.mb_y = 0;
447  c->skip= 0;
448 
449  av_assert1(c-> stride == stride);
450  av_assert1(c->uvstride == uvstride);
451 
452  c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
453  c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
454  c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
455  c->current_mv_penalty = c->mv_penalty[enc->m.s.f_code=1] + MAX_DMV;
456 
457  c->xmin = - x*block_w - 16+3;
458  c->ymin = - y*block_w - 16+3;
459  c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
460  c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
461 
462  c->xmin = FFMAX(c->xmin,-range);
463  c->xmax = FFMIN(c->xmax, range);
464  c->ymin = FFMAX(c->ymin,-range);
465  c->ymax = FFMIN(c->ymax, range);
466 
467  if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
468  if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
469  if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
470  if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
471  if(P_TOPRIGHT[0] < (c->xmin * (1<<shift))) P_TOPRIGHT[0]= (c->xmin * (1<<shift));
472  if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
473  if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
474 
475  P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
476  P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
477 
478  if (!y) {
479  c->pred_x= P_LEFT[0];
480  c->pred_y= P_LEFT[1];
481  } else {
482  c->pred_x = P_MEDIAN[0];
483  c->pred_y = P_MEDIAN[1];
484  }
485 
486  score= INT_MAX;
487  best_ref= 0;
488  for(ref=0; ref<s->ref_frames; ref++){
489  init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
490 
491  ref_score = ff_epzs_motion_search(&enc->m.s, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
492  (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
493 
494  av_assert2(ref_mx >= c->xmin);
495  av_assert2(ref_mx <= c->xmax);
496  av_assert2(ref_my >= c->ymin);
497  av_assert2(ref_my <= c->ymax);
498 
499  ref_score = c->sub_motion_search(&enc->m.s, &ref_mx, &ref_my, ref_score,
500  0, 0, level-LOG2_MB_SIZE+4, block_w);
501  ref_score = ff_get_mb_score(&enc->m.s, ref_mx, ref_my, 0, 0,
502  level-LOG2_MB_SIZE+4, block_w, 0);
503  ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
504  if(s->ref_mvs[ref]){
505  s->ref_mvs[ref][index][0]= ref_mx;
506  s->ref_mvs[ref][index][1]= ref_my;
507  s->ref_scores[ref][index]= ref_score;
508  }
509  if(score > ref_score){
510  score= ref_score;
511  best_ref= ref;
512  mx= ref_mx;
513  my= ref_my;
514  }
515  }
516  //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
517 
518  // subpel search
519  base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
520  pc= s->c;
521  pc.bytestream_start=
522  pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
523  memcpy(p_state, s->block_state, sizeof(s->block_state));
524 
525  if(level!=s->block_max_depth)
526  put_rac(&pc, &p_state[4 + s_context], 1);
527  put_rac(&pc, &p_state[1 + left->type + top->type], 0);
528  if(s->ref_frames > 1)
529  put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
530  pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
531  put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
532  put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
533  p_len= pc.bytestream - pc.bytestream_start;
534  score += (enc->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
535 
536  block_s= block_w*block_w;
537  sum = pix_sum(current_data[0], stride, block_w, block_w);
538  l= (sum + block_s/2)/block_s;
539  iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
540 
541  if (s->nb_planes > 2) {
542  block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
543  sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
544  cb= (sum + block_s/2)/block_s;
545  // iscore += pix_norm1(&current_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
546  sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
547  cr= (sum + block_s/2)/block_s;
548  // iscore += pix_norm1(&current_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
549  }else
550  cb = cr = 0;
551 
552  ic= s->c;
553  ic.bytestream_start=
554  ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
555  memcpy(i_state, s->block_state, sizeof(s->block_state));
556  if(level!=s->block_max_depth)
557  put_rac(&ic, &i_state[4 + s_context], 1);
558  put_rac(&ic, &i_state[1 + left->type + top->type], 1);
559  put_symbol(&ic, &i_state[32], l-pl , 1);
560  if (s->nb_planes > 2) {
561  put_symbol(&ic, &i_state[64], cb-pcb, 1);
562  put_symbol(&ic, &i_state[96], cr-pcr, 1);
563  }
564  i_len= ic.bytestream - ic.bytestream_start;
565  iscore += (enc->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
566 
567  av_assert1(iscore < 255*255*256 + enc->lambda2*10);
568  av_assert1(iscore >= 0);
569  av_assert1(l>=0 && l<=255);
570  av_assert1(pl>=0 && pl<=255);
571 
572  if(level==0){
573  int varc= iscore >> 8;
574  int vard= score >> 8;
575  if (vard <= 64 || vard < varc)
576  c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
577  else
578  c->scene_change_score += enc->m.s.c.qscale;
579  }
580 
581  if(level!=s->block_max_depth){
582  put_rac(&s->c, &s->block_state[4 + s_context], 0);
583  score2 = encode_q_branch(enc, level+1, 2*x+0, 2*y+0);
584  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+0);
585  score2+= encode_q_branch(enc, level+1, 2*x+0, 2*y+1);
586  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+1);
587  score2+= enc->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
588 
589  if(score2 < score && score2 < iscore)
590  return score2;
591  }
592 
593  if(iscore < score){
594  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
595  memcpy(pbbak, i_buffer, i_len);
596  s->c= ic;
597  s->c.bytestream_start= pbbak_start;
598  s->c.bytestream= pbbak + i_len;
599  set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
600  memcpy(s->block_state, i_state, sizeof(s->block_state));
601  return iscore;
602  }else{
603  memcpy(pbbak, p_buffer, p_len);
604  s->c= pc;
605  s->c.bytestream_start= pbbak_start;
606  s->c.bytestream= pbbak + p_len;
607  set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
608  memcpy(s->block_state, p_state, sizeof(s->block_state));
609  return score;
610  }
611 }
612 
613 static void encode_q_branch2(SnowContext *s, int level, int x, int y){
614  const int w= s->b_width << s->block_max_depth;
615  const int rem_depth= s->block_max_depth - level;
616  const int index= (x + y*w) << rem_depth;
617  int trx= (x+1)<<rem_depth;
618  BlockNode *b= &s->block[index];
619  const BlockNode *left = x ? &s->block[index-1] : &null_block;
620  const BlockNode *top = y ? &s->block[index-w] : &null_block;
621  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
622  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
623  int pl = left->color[0];
624  int pcb= left->color[1];
625  int pcr= left->color[2];
626  int pmx, pmy;
627  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
628  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
629  int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
630  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
631 
632  if(s->keyframe){
633  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
634  return;
635  }
636 
637  if(level!=s->block_max_depth){
638  if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
639  put_rac(&s->c, &s->block_state[4 + s_context], 1);
640  }else{
641  put_rac(&s->c, &s->block_state[4 + s_context], 0);
642  encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
643  encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
644  encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
645  encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
646  return;
647  }
648  }
649  if(b->type & BLOCK_INTRA){
650  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
651  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
652  put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
653  if (s->nb_planes > 2) {
654  put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
655  put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
656  }
657  set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
658  }else{
659  pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
660  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
661  if(s->ref_frames > 1)
662  put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
663  put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
664  put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
665  set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
666  }
667 }
668 
669 static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
670 {
671  SnowContext *const s = &enc->com;
672  int i, x2, y2;
673  Plane *p= &s->plane[plane_index];
674  const int block_size = MB_SIZE >> s->block_max_depth;
675  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
676  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
677  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
678  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
679  const int ref_stride= s->current_picture->linesize[plane_index];
680  const uint8_t *src = s->input_picture->data[plane_index];
681  IDWTELEM *dst = enc->obmc_scratchpad + plane_index * block_size * block_size * 4; //FIXME change to unsigned
682  const int b_stride = s->b_width << s->block_max_depth;
683  const int w= p->width;
684  const int h= p->height;
685  int index= mb_x + mb_y*b_stride;
686  BlockNode *b= &s->block[index];
687  BlockNode backup= *b;
688  int ab=0;
689  int aa=0;
690 
691  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
692 
693  b->type|= BLOCK_INTRA;
694  b->color[plane_index]= 0;
695  memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
696 
697  for(i=0; i<4; i++){
698  int mb_x2= mb_x + (i &1) - 1;
699  int mb_y2= mb_y + (i>>1) - 1;
700  int x= block_w*mb_x2 + block_w/2;
701  int y= block_h*mb_y2 + block_h/2;
702 
703  add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
704  x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
705 
706  for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
707  for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
708  int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
709  int obmc_v= obmc[index];
710  int d;
711  if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
712  if(x<0) obmc_v += obmc[index + block_w];
713  if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
714  if(x+block_w>w) obmc_v += obmc[index - block_w];
715  //FIXME precalculate this or simplify it somehow else
716 
717  d = -dst[index] + (1<<(FRAC_BITS-1));
718  dst[index] = d;
719  ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
720  aa += obmc_v * obmc_v; //FIXME precalculate this
721  }
722  }
723  }
724  *b= backup;
725 
726  return av_clip_uint8( ROUNDED_DIV((int64_t)ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
727 }
728 
729 static inline int get_block_bits(SnowContext *s, int x, int y, int w){
730  const int b_stride = s->b_width << s->block_max_depth;
731  const int b_height = s->b_height<< s->block_max_depth;
732  int index= x + y*b_stride;
733  const BlockNode *b = &s->block[index];
734  const BlockNode *left = x ? &s->block[index-1] : &null_block;
735  const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
736  const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
737  const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
738  int dmx, dmy;
739 // int mx_context= av_log2(2*FFABS(left->mx - top->mx));
740 // int my_context= av_log2(2*FFABS(left->my - top->my));
741 
742  if(x<0 || x>=b_stride || y>=b_height)
743  return 0;
744 /*
745 1 0 0
746 01X 1-2 1
747 001XX 3-6 2-3
748 0001XXX 7-14 4-7
749 00001XXXX 15-30 8-15
750 */
751 //FIXME try accurate rate
752 //FIXME intra and inter predictors if surrounding blocks are not the same type
753  if(b->type & BLOCK_INTRA){
754  return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
755  + av_log2(2*FFABS(left->color[1] - b->color[1]))
756  + av_log2(2*FFABS(left->color[2] - b->color[2])));
757  }else{
758  pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
759  dmx-= b->mx;
760  dmy-= b->my;
761  return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
762  + av_log2(2*FFABS(dmy))
763  + av_log2(2*b->ref));
764  }
765 }
766 
767 static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
768  int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2])
769 {
770  SnowContext *const s = &enc->com;
771  Plane *p= &s->plane[plane_index];
772  const int block_size = MB_SIZE >> s->block_max_depth;
773  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
774  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
775  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
776  const int ref_stride= s->current_picture->linesize[plane_index];
777  uint8_t *dst= s->current_picture->data[plane_index];
778  const uint8_t *src = s->input_picture->data[plane_index];
779  IDWTELEM *pred = enc->obmc_scratchpad + plane_index * block_size * block_size * 4;
780  uint8_t *cur = s->scratchbuf;
781  uint8_t *tmp = enc->emu_edge_buffer;
782  const int b_stride = s->b_width << s->block_max_depth;
783  const int b_height = s->b_height<< s->block_max_depth;
784  const int w= p->width;
785  const int h= p->height;
786  int distortion;
787  int rate= 0;
788  const int penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
789  int sx= block_w*mb_x - block_w/2;
790  int sy= block_h*mb_y - block_h/2;
791  int x0= FFMAX(0,-sx);
792  int y0= FFMAX(0,-sy);
793  int x1= FFMIN(block_w*2, w-sx);
794  int y1= FFMIN(block_h*2, h-sy);
795  int i,x,y;
796 
797  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumptions below chckinhg only block_w
798 
799  ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
800 
801  for(y=y0; y<y1; y++){
802  const uint8_t *obmc1= obmc_edged[y];
803  const IDWTELEM *pred1 = pred + y*obmc_stride;
804  uint8_t *cur1 = cur + y*ref_stride;
805  uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
806  for(x=x0; x<x1; x++){
807 #if FRAC_BITS >= LOG2_OBMC_MAX
808  int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
809 #else
810  int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
811 #endif
812  v = (v + pred1[x]) >> FRAC_BITS;
813  if(v&(~255)) v= ~(v>>31);
814  dst1[x] = v;
815  }
816  }
817 
818  /* copy the regions where obmc[] = (uint8_t)256 */
819  if(LOG2_OBMC_MAX == 8
820  && (mb_x == 0 || mb_x == b_stride-1)
821  && (mb_y == 0 || mb_y == b_height-1)){
822  if(mb_x == 0)
823  x1 = block_w;
824  else
825  x0 = block_w;
826  if(mb_y == 0)
827  y1 = block_h;
828  else
829  y0 = block_h;
830  for(y=y0; y<y1; y++)
831  memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
832  }
833 
834  if(block_w==16){
835  /* FIXME rearrange dsputil to fit 32x32 cmp functions */
836  /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
837  /* FIXME cmps overlap but do not cover the wavelet's whole support.
838  * So improving the score of one block is not strictly guaranteed
839  * to improve the score of the whole frame, thus iterative motion
840  * estimation does not always converge. */
841  if(s->avctx->me_cmp == FF_CMP_W97)
842  distortion = ff_w97_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
843  else if(s->avctx->me_cmp == FF_CMP_W53)
844  distortion = ff_w53_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
845  else{
846  distortion = 0;
847  for(i=0; i<4; i++){
848  int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
849  distortion += enc->m.s.me.me_cmp[0](&enc->m.s, src + off, dst + off, ref_stride, 16);
850  }
851  }
852  }else{
853  av_assert2(block_w==8);
854  distortion = enc->m.s.me.me_cmp[0](&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
855  }
856 
857  if(plane_index==0){
858  for(i=0; i<4; i++){
859 /* ..RRr
860  * .RXx.
861  * rxx..
862  */
863  rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
864  }
865  if(mb_x == b_stride-2)
866  rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
867  }
868  return distortion + rate*penalty_factor;
869 }
870 
871 static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
872 {
873  SnowContext *const s = &enc->com;
874  int i, y2;
875  Plane *p= &s->plane[plane_index];
876  const int block_size = MB_SIZE >> s->block_max_depth;
877  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
878  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
879  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
880  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
881  const int ref_stride= s->current_picture->linesize[plane_index];
882  uint8_t *dst= s->current_picture->data[plane_index];
883  const uint8_t *src = s->input_picture->data[plane_index];
884  //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
885  // const has only been removed from zero_dst to suppress a warning
886  static IDWTELEM zero_dst[4096]; //FIXME
887  const int b_stride = s->b_width << s->block_max_depth;
888  const int w= p->width;
889  const int h= p->height;
890  int distortion= 0;
891  int rate= 0;
892  const int penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
893 
894  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumptions below
895 
896  for(i=0; i<9; i++){
897  int mb_x2= mb_x + (i%3) - 1;
898  int mb_y2= mb_y + (i/3) - 1;
899  int x= block_w*mb_x2 + block_w/2;
900  int y= block_h*mb_y2 + block_h/2;
901 
902  add_yblock(s, 0, NULL, zero_dst, dst, obmc,
903  x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
904 
905  //FIXME find a cleaner/simpler way to skip the outside stuff
906  for(y2= y; y2<0; y2++)
907  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
908  for(y2= h; y2<y+block_h; y2++)
909  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
910  if(x<0){
911  for(y2= y; y2<y+block_h; y2++)
912  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
913  }
914  if(x+block_w > w){
915  for(y2= y; y2<y+block_h; y2++)
916  memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
917  }
918 
919  av_assert1(block_w== 8 || block_w==16);
920  distortion += enc->m.s.me.me_cmp[block_w==8](&enc->m.s, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
921  }
922 
923  if(plane_index==0){
924  BlockNode *b= &s->block[mb_x+mb_y*b_stride];
925  int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
926 
927 /* ..RRRr
928  * .RXXx.
929  * .RXXx.
930  * rxxx.
931  */
932  if(merged)
933  rate = get_block_bits(s, mb_x, mb_y, 2);
934  for(i=merged?4:0; i<9; i++){
935  static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
936  rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
937  }
938  }
939  return distortion + rate*penalty_factor;
940 }
941 
942 static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
943  const int w= b->width;
944  const int h= b->height;
945  int x, y;
946 
947  if(1){
948  int run=0;
949  int *runs = s->run_buffer;
950  int run_index=0;
951  int max_index;
952 
953  for(y=0; y<h; y++){
954  for(x=0; x<w; x++){
955  int v, p=0;
956  int /*ll=0, */l=0, lt=0, t=0, rt=0;
957  v= src[x + y*stride];
958 
959  if(y){
960  t= src[x + (y-1)*stride];
961  if(x){
962  lt= src[x - 1 + (y-1)*stride];
963  }
964  if(x + 1 < w){
965  rt= src[x + 1 + (y-1)*stride];
966  }
967  }
968  if(x){
969  l= src[x - 1 + y*stride];
970  /*if(x > 1){
971  if(orientation==1) ll= src[y + (x-2)*stride];
972  else ll= src[x - 2 + y*stride];
973  }*/
974  }
975  if(parent){
976  int px= x>>1;
977  int py= y>>1;
978  if(px<b->parent->width && py<b->parent->height)
979  p= parent[px + py*2*stride];
980  }
981  if(!(/*ll|*/l|lt|t|rt|p)){
982  if(v){
983  runs[run_index++]= run;
984  run=0;
985  }else{
986  run++;
987  }
988  }
989  }
990  }
991  max_index= run_index;
992  runs[run_index++]= run;
993  run_index=0;
994  run= runs[run_index++];
995 
996  put_symbol2(&s->c, b->state[30], max_index, 0);
997  if(run_index <= max_index)
998  put_symbol2(&s->c, b->state[1], run, 3);
999 
1000  for(y=0; y<h; y++){
1001  if(s->c.bytestream_end - s->c.bytestream < w*40){
1002  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1003  return AVERROR(ENOMEM);
1004  }
1005  for(x=0; x<w; x++){
1006  int v, p=0;
1007  int /*ll=0, */l=0, lt=0, t=0, rt=0;
1008  v= src[x + y*stride];
1009 
1010  if(y){
1011  t= src[x + (y-1)*stride];
1012  if(x){
1013  lt= src[x - 1 + (y-1)*stride];
1014  }
1015  if(x + 1 < w){
1016  rt= src[x + 1 + (y-1)*stride];
1017  }
1018  }
1019  if(x){
1020  l= src[x - 1 + y*stride];
1021  /*if(x > 1){
1022  if(orientation==1) ll= src[y + (x-2)*stride];
1023  else ll= src[x - 2 + y*stride];
1024  }*/
1025  }
1026  if(parent){
1027  int px= x>>1;
1028  int py= y>>1;
1029  if(px<b->parent->width && py<b->parent->height)
1030  p= parent[px + py*2*stride];
1031  }
1032  if(/*ll|*/l|lt|t|rt|p){
1033  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1034 
1035  put_rac(&s->c, &b->state[0][context], !!v);
1036  }else{
1037  if(!run){
1038  run= runs[run_index++];
1039 
1040  if(run_index <= max_index)
1041  put_symbol2(&s->c, b->state[1], run, 3);
1042  av_assert2(v);
1043  }else{
1044  run--;
1045  av_assert2(!v);
1046  }
1047  }
1048  if(v){
1049  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1050  int l2= 2*FFABS(l) + (l<0);
1051  int t2= 2*FFABS(t) + (t<0);
1052 
1053  put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
1054  put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
1055  }
1056  }
1057  }
1058  }
1059  return 0;
1060 }
1061 
1062 static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
1063 // encode_subband_qtree(s, b, src, parent, stride, orientation);
1064 // encode_subband_z0run(s, b, src, parent, stride, orientation);
1065  return encode_subband_c0run(s, b, src, parent, stride, orientation);
1066 // encode_subband_dzr(s, b, src, parent, stride, orientation);
1067 }
1068 
1069 static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3],
1070  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1071 {
1072  SnowContext *const s = &enc->com;
1073  const int b_stride= s->b_width << s->block_max_depth;
1074  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1075  BlockNode backup= *block;
1076  int rd;
1077 
1078  av_assert2(mb_x>=0 && mb_y>=0);
1079  av_assert2(mb_x<b_stride);
1080 
1081  block->color[0] = p[0];
1082  block->color[1] = p[1];
1083  block->color[2] = p[2];
1084  block->type |= BLOCK_INTRA;
1085 
1086  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged) + enc->intra_penalty;
1087 
1088 //FIXME chroma
1089  if(rd < *best_rd){
1090  *best_rd= rd;
1091  return 1;
1092  }else{
1093  *block= backup;
1094  return 0;
1095  }
1096 }
1097 
1098 /* special case for int[2] args we discard afterwards,
1099  * fixes compilation problem with gcc 2.95 */
1101  int mb_x, int mb_y, int p0, int p1,
1102  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1103 {
1104  SnowContext *const s = &enc->com;
1105  const int b_stride = s->b_width << s->block_max_depth;
1106  BlockNode *block = &s->block[mb_x + mb_y * b_stride];
1107  BlockNode backup = *block;
1108  unsigned value;
1109  int rd, index;
1110 
1111  av_assert2(mb_x >= 0 && mb_y >= 0);
1112  av_assert2(mb_x < b_stride);
1113 
1114  index = (p0 + 31 * p1) & (ME_CACHE_SIZE-1);
1115  value = enc->me_cache_generation + (p0 >> 10) + p1 * (1 << 6) + (block->ref << 12);
1116  if (enc->me_cache[index] == value)
1117  return 0;
1118  enc->me_cache[index] = value;
1119 
1120  block->mx = p0;
1121  block->my = p1;
1122  block->type &= ~BLOCK_INTRA;
1123 
1124  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged);
1125 
1126 //FIXME chroma
1127  if (rd < *best_rd) {
1128  *best_rd = rd;
1129  return 1;
1130  } else {
1131  *block = backup;
1132  return 0;
1133  }
1134 }
1135 
1136 static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y,
1137  int p0, int p1, int ref, int *best_rd)
1138 {
1139  SnowContext *const s = &enc->com;
1140  const int b_stride= s->b_width << s->block_max_depth;
1141  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1142  BlockNode backup[4];
1143  unsigned value;
1144  int rd, index;
1145 
1146  /* We don't initialize backup[] during variable declaration, because
1147  * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
1148  * 'int16_t'". */
1149  backup[0] = block[0];
1150  backup[1] = block[1];
1151  backup[2] = block[b_stride];
1152  backup[3] = block[b_stride + 1];
1153 
1154  av_assert2(mb_x>=0 && mb_y>=0);
1155  av_assert2(mb_x<b_stride);
1156  av_assert2(((mb_x|mb_y)&1) == 0);
1157 
1158  index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
1159  value = enc->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
1160  if (enc->me_cache[index] == value)
1161  return 0;
1162  enc->me_cache[index] = value;
1163 
1164  block->mx= p0;
1165  block->my= p1;
1166  block->ref= ref;
1167  block->type &= ~BLOCK_INTRA;
1168  block[1]= block[b_stride]= block[b_stride+1]= *block;
1169 
1170  rd = get_4block_rd(enc, mb_x, mb_y, 0);
1171 
1172 //FIXME chroma
1173  if(rd < *best_rd){
1174  *best_rd= rd;
1175  return 1;
1176  }else{
1177  block[0]= backup[0];
1178  block[1]= backup[1];
1179  block[b_stride]= backup[2];
1180  block[b_stride+1]= backup[3];
1181  return 0;
1182  }
1183 }
1184 
1185 static void iterative_me(SnowEncContext *enc)
1186 {
1187  SnowContext *const s = &enc->com;
1188  int pass, mb_x, mb_y;
1189  const int b_width = s->b_width << s->block_max_depth;
1190  const int b_height= s->b_height << s->block_max_depth;
1191  const int b_stride= b_width;
1192  int color[3];
1193 
1194  {
1195  RangeCoder r = s->c;
1196  uint8_t state[sizeof(s->block_state)];
1197  memcpy(state, s->block_state, sizeof(s->block_state));
1198  for(mb_y= 0; mb_y<s->b_height; mb_y++)
1199  for(mb_x= 0; mb_x<s->b_width; mb_x++)
1200  encode_q_branch(enc, 0, mb_x, mb_y);
1201  s->c = r;
1202  memcpy(s->block_state, state, sizeof(s->block_state));
1203  }
1204 
1205  for(pass=0; pass<25; pass++){
1206  int change= 0;
1207 
1208  for(mb_y= 0; mb_y<b_height; mb_y++){
1209  for(mb_x= 0; mb_x<b_width; mb_x++){
1210  int dia_change, i, j, ref;
1211  int best_rd= INT_MAX, ref_rd;
1212  BlockNode backup, ref_b;
1213  const int index= mb_x + mb_y * b_stride;
1214  BlockNode *block= &s->block[index];
1215  BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
1216  BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
1217  BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
1218  BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
1219  BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
1220  BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
1221  BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
1222  BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
1223  const int b_w= (MB_SIZE >> s->block_max_depth);
1224  uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
1225 
1226  if(pass && (block->type & BLOCK_OPT))
1227  continue;
1228  block->type |= BLOCK_OPT;
1229 
1230  backup= *block;
1231 
1232  if (!enc->me_cache_generation)
1233  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1234  enc->me_cache_generation += 1<<22;
1235 
1236  //FIXME precalculate
1237  {
1238  int x, y;
1239  for (y = 0; y < b_w * 2; y++)
1240  memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
1241  if(mb_x==0)
1242  for(y=0; y<b_w*2; y++)
1243  memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1244  if(mb_x==b_stride-1)
1245  for(y=0; y<b_w*2; y++)
1246  memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1247  if(mb_y==0){
1248  for(x=0; x<b_w*2; x++)
1249  obmc_edged[0][x] += obmc_edged[b_w-1][x];
1250  for(y=1; y<b_w; y++)
1251  memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1252  }
1253  if(mb_y==b_height-1){
1254  for(x=0; x<b_w*2; x++)
1255  obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1256  for(y=b_w; y<b_w*2-1; y++)
1257  memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1258  }
1259  }
1260 
1261  //skip stuff outside the picture
1262  if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1263  const uint8_t *src = s->input_picture->data[0];
1264  uint8_t *dst= s->current_picture->data[0];
1265  const int stride= s->current_picture->linesize[0];
1266  const int block_w= MB_SIZE >> s->block_max_depth;
1267  const int block_h= MB_SIZE >> s->block_max_depth;
1268  const int sx= block_w*mb_x - block_w/2;
1269  const int sy= block_h*mb_y - block_h/2;
1270  const int w= s->plane[0].width;
1271  const int h= s->plane[0].height;
1272  int y;
1273 
1274  for(y=sy; y<0; y++)
1275  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1276  for(y=h; y<sy+block_h*2; y++)
1277  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1278  if(sx<0){
1279  for(y=sy; y<sy+block_h*2; y++)
1280  memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
1281  }
1282  if(sx+block_w*2 > w){
1283  for(y=sy; y<sy+block_h*2; y++)
1284  memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
1285  }
1286  }
1287 
1288  // intra(black) = neighbors' contribution to the current block
1289  for(i=0; i < s->nb_planes; i++)
1290  color[i]= get_dc(enc, mb_x, mb_y, i);
1291 
1292  // get previous score (cannot be cached due to OBMC)
1293  if(pass > 0 && (block->type&BLOCK_INTRA)){
1294  int color0[3]= {block->color[0], block->color[1], block->color[2]};
1295  check_block_intra(enc, mb_x, mb_y, color0, obmc_edged, &best_rd);
1296  }else
1297  check_block_inter(enc, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
1298 
1299  ref_b= *block;
1300  ref_rd= best_rd;
1301  for(ref=0; ref < s->ref_frames; ref++){
1302  int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
1303  if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
1304  continue;
1305  block->ref= ref;
1306  best_rd= INT_MAX;
1307 
1308  check_block_inter(enc, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
1309  check_block_inter(enc, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
1310  if(tb)
1311  check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1312  if(lb)
1313  check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1314  if(rb)
1315  check_block_inter(enc, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
1316  if(bb)
1317  check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1318 
1319  /* fullpel ME */
1320  //FIXME avoid subpel interpolation / round to nearest integer
1321  do{
1322  int newx = block->mx;
1323  int newy = block->my;
1324  int dia_size = enc->iterative_dia_size ? enc->iterative_dia_size : FFMAX(s->avctx->dia_size, 1);
1325  dia_change=0;
1326  for(i=0; i < dia_size; i++){
1327  for(j=0; j<i; j++){
1328  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
1329  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
1330  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
1331  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
1332  }
1333  }
1334  }while(dia_change);
1335  /* subpel ME */
1336  do{
1337  static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1338  dia_change=0;
1339  for(i=0; i<8; i++)
1340  dia_change |= check_block_inter(enc, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
1341  }while(dia_change);
1342  //FIXME or try the standard 2 pass qpel or similar
1343 
1344  mvr[0][0]= block->mx;
1345  mvr[0][1]= block->my;
1346  if(ref_rd > best_rd){
1347  ref_rd= best_rd;
1348  ref_b= *block;
1349  }
1350  }
1351  best_rd= ref_rd;
1352  *block= ref_b;
1353  check_block_intra(enc, mb_x, mb_y, color, obmc_edged, &best_rd);
1354  //FIXME RD style color selection
1355  if(!same_block(block, &backup)){
1356  if(tb ) tb ->type &= ~BLOCK_OPT;
1357  if(lb ) lb ->type &= ~BLOCK_OPT;
1358  if(rb ) rb ->type &= ~BLOCK_OPT;
1359  if(bb ) bb ->type &= ~BLOCK_OPT;
1360  if(tlb) tlb->type &= ~BLOCK_OPT;
1361  if(trb) trb->type &= ~BLOCK_OPT;
1362  if(blb) blb->type &= ~BLOCK_OPT;
1363  if(brb) brb->type &= ~BLOCK_OPT;
1364  change ++;
1365  }
1366  }
1367  }
1368  av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
1369  if(!change)
1370  break;
1371  }
1372 
1373  if(s->block_max_depth == 1){
1374  int change= 0;
1375  for(mb_y= 0; mb_y<b_height; mb_y+=2){
1376  for(mb_x= 0; mb_x<b_width; mb_x+=2){
1377  int i;
1378  int best_rd, init_rd;
1379  const int index= mb_x + mb_y * b_stride;
1380  BlockNode *b[4];
1381 
1382  b[0]= &s->block[index];
1383  b[1]= b[0]+1;
1384  b[2]= b[0]+b_stride;
1385  b[3]= b[2]+1;
1386  if(same_block(b[0], b[1]) &&
1387  same_block(b[0], b[2]) &&
1388  same_block(b[0], b[3]))
1389  continue;
1390 
1391  if (!enc->me_cache_generation)
1392  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1393  enc->me_cache_generation += 1<<22;
1394 
1395  init_rd = best_rd = get_4block_rd(enc, mb_x, mb_y, 0);
1396 
1397  //FIXME more multiref search?
1398  check_4block_inter(enc, mb_x, mb_y,
1399  (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
1400  (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
1401 
1402  for(i=0; i<4; i++)
1403  if(!(b[i]->type&BLOCK_INTRA))
1404  check_4block_inter(enc, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
1405 
1406  if(init_rd != best_rd)
1407  change++;
1408  }
1409  }
1410  av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
1411  }
1412 }
1413 
1414 static void encode_blocks(SnowEncContext *enc, int search)
1415 {
1416  SnowContext *const s = &enc->com;
1417  int x, y;
1418  int w= s->b_width;
1419  int h= s->b_height;
1420 
1421  if (enc->motion_est == FF_ME_ITER && !s->keyframe && search)
1422  iterative_me(enc);
1423 
1424  for(y=0; y<h; y++){
1425  if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
1426  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1427  return;
1428  }
1429  for(x=0; x<w; x++){
1430  if (enc->motion_est == FF_ME_ITER || !search)
1431  encode_q_branch2(s, 0, x, y);
1432  else
1433  encode_q_branch (enc, 0, x, y);
1434  }
1435  }
1436 }
1437 
1438 static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
1439  const int w= b->width;
1440  const int h= b->height;
1441  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1442  const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
1443  int x,y, thres1, thres2;
1444 
1445  if(s->qlog == LOSSLESS_QLOG){
1446  for(y=0; y<h; y++)
1447  for(x=0; x<w; x++)
1448  dst[x + y*stride]= src[x + y*stride];
1449  return;
1450  }
1451 
1452  bias= bias ? 0 : (3*qmul)>>3;
1453  thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
1454  thres2= 2*thres1;
1455 
1456  if(!bias){
1457  for(y=0; y<h; y++){
1458  for(x=0; x<w; x++){
1459  int i= src[x + y*stride];
1460 
1461  if((unsigned)(i+thres1) > thres2){
1462  if(i>=0){
1463  i<<= QEXPSHIFT;
1464  i/= qmul; //FIXME optimize
1465  dst[x + y*stride]= i;
1466  }else{
1467  i= -i;
1468  i<<= QEXPSHIFT;
1469  i/= qmul; //FIXME optimize
1470  dst[x + y*stride]= -i;
1471  }
1472  }else
1473  dst[x + y*stride]= 0;
1474  }
1475  }
1476  }else{
1477  for(y=0; y<h; y++){
1478  for(x=0; x<w; x++){
1479  int i= src[x + y*stride];
1480 
1481  if((unsigned)(i+thres1) > thres2){
1482  if(i>=0){
1483  i<<= QEXPSHIFT;
1484  i= (i + bias) / qmul; //FIXME optimize
1485  dst[x + y*stride]= i;
1486  }else{
1487  i= -i;
1488  i<<= QEXPSHIFT;
1489  i= (i + bias) / qmul; //FIXME optimize
1490  dst[x + y*stride]= -i;
1491  }
1492  }else
1493  dst[x + y*stride]= 0;
1494  }
1495  }
1496  }
1497 }
1498 
1500  const int w= b->width;
1501  const int h= b->height;
1502  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1503  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1504  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1505  int x,y;
1506 
1507  if(s->qlog == LOSSLESS_QLOG) return;
1508 
1509  for(y=0; y<h; y++){
1510  for(x=0; x<w; x++){
1511  int i= src[x + y*stride];
1512  if(i<0){
1513  src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
1514  }else if(i>0){
1515  src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
1516  }
1517  }
1518  }
1519 }
1520 
1521 static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1522  const int w= b->width;
1523  const int h= b->height;
1524  int x,y;
1525 
1526  for(y=h-1; y>=0; y--){
1527  for(x=w-1; x>=0; x--){
1528  int i= x + y*stride;
1529 
1530  if(x){
1531  if(use_median){
1532  if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1533  else src[i] -= src[i - 1];
1534  }else{
1535  if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1536  else src[i] -= src[i - 1];
1537  }
1538  }else{
1539  if(y) src[i] -= src[i - stride];
1540  }
1541  }
1542  }
1543 }
1544 
1545 static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1546  const int w= b->width;
1547  const int h= b->height;
1548  int x,y;
1549 
1550  for(y=0; y<h; y++){
1551  for(x=0; x<w; x++){
1552  int i= x + y*stride;
1553 
1554  if(x){
1555  if(use_median){
1556  if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1557  else src[i] += src[i - 1];
1558  }else{
1559  if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1560  else src[i] += src[i - 1];
1561  }
1562  }else{
1563  if(y) src[i] += src[i - stride];
1564  }
1565  }
1566  }
1567 }
1568 
1570  int plane_index, level, orientation;
1571 
1572  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1573  for(level=0; level<s->spatial_decomposition_count; level++){
1574  for(orientation=level ? 1:0; orientation<4; orientation++){
1575  if(orientation==2) continue;
1576  put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
1577  }
1578  }
1579  }
1580 }
1581 
1583  int plane_index, i;
1584  uint8_t kstate[32];
1585 
1586  memset(kstate, MID_STATE, sizeof(kstate));
1587 
1588  put_rac(&s->c, kstate, s->keyframe);
1589  if(s->keyframe || s->always_reset){
1591  s->last_spatial_decomposition_type=
1592  s->last_qlog=
1593  s->last_qbias=
1594  s->last_mv_scale=
1595  s->last_block_max_depth= 0;
1596  for(plane_index=0; plane_index<2; plane_index++){
1597  Plane *p= &s->plane[plane_index];
1598  p->last_htaps=0;
1599  p->last_diag_mc=0;
1600  memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
1601  }
1602  }
1603  if(s->keyframe){
1604  put_symbol(&s->c, s->header_state, s->version, 0);
1605  put_rac(&s->c, s->header_state, s->always_reset);
1606  put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
1607  put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
1608  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1609  put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
1610  if (s->nb_planes > 2) {
1611  put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
1612  put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
1613  }
1614  put_rac(&s->c, s->header_state, s->spatial_scalability);
1615 // put_rac(&s->c, s->header_state, s->rate_scalability);
1616  put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
1617 
1618  encode_qlogs(s);
1619  }
1620 
1621  if(!s->keyframe){
1622  int update_mc=0;
1623  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1624  Plane *p= &s->plane[plane_index];
1625  update_mc |= p->last_htaps != p->htaps;
1626  update_mc |= p->last_diag_mc != p->diag_mc;
1627  update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1628  }
1629  put_rac(&s->c, s->header_state, update_mc);
1630  if(update_mc){
1631  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1632  Plane *p= &s->plane[plane_index];
1633  put_rac(&s->c, s->header_state, p->diag_mc);
1634  put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
1635  for(i= p->htaps/2; i; i--)
1636  put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
1637  }
1638  }
1639  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1640  put_rac(&s->c, s->header_state, 1);
1641  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1642  encode_qlogs(s);
1643  }else
1644  put_rac(&s->c, s->header_state, 0);
1645  }
1646 
1647  put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
1648  put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
1649  put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
1650  put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
1651  put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
1652 
1653 }
1654 
1656  int plane_index;
1657 
1658  if(!s->keyframe){
1659  for(plane_index=0; plane_index<2; plane_index++){
1660  Plane *p= &s->plane[plane_index];
1661  p->last_diag_mc= p->diag_mc;
1662  p->last_htaps = p->htaps;
1663  memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1664  }
1665  }
1666 
1667  s->last_spatial_decomposition_type = s->spatial_decomposition_type;
1668  s->last_qlog = s->qlog;
1669  s->last_qbias = s->qbias;
1670  s->last_mv_scale = s->mv_scale;
1671  s->last_block_max_depth = s->block_max_depth;
1672  s->last_spatial_decomposition_count = s->spatial_decomposition_count;
1673 }
1674 
1675 static int qscale2qlog(int qscale){
1676  return lrint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
1677  + 61*QROOT/8; ///< 64 > 60
1678 }
1679 
1681 {
1682  SnowContext *const s = &enc->com;
1683  /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
1684  * FIXME we know exact mv bits at this point,
1685  * but ratecontrol isn't set up to include them. */
1686  uint32_t coef_sum= 0;
1687  int level, orientation, delta_qlog;
1688 
1689  for(level=0; level<s->spatial_decomposition_count; level++){
1690  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1691  SubBand *b= &s->plane[0].band[level][orientation];
1692  IDWTELEM *buf= b->ibuf;
1693  const int w= b->width;
1694  const int h= b->height;
1695  const int stride= b->stride;
1696  const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
1697  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1698  const int qdiv= (1<<16)/qmul;
1699  int x, y;
1700  //FIXME this is ugly
1701  for(y=0; y<h; y++)
1702  for(x=0; x<w; x++)
1703  buf[x+y*stride]= b->buf[x+y*stride];
1704  if(orientation==0)
1705  decorrelate(s, b, buf, stride, 1, 0);
1706  for(y=0; y<h; y++)
1707  for(x=0; x<w; x++)
1708  coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
1709  }
1710  }
1711  emms_c();
1712 
1713  /* ugly, ratecontrol just takes a sqrt again */
1714  av_assert0(coef_sum < INT_MAX);
1715  coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1716 
1717  if(pict->pict_type == AV_PICTURE_TYPE_I){
1718  enc->m.mb_var_sum = coef_sum;
1719  enc->m.mc_mb_var_sum = 0;
1720  }else{
1721  enc->m.mc_mb_var_sum = coef_sum;
1722  enc->m.mb_var_sum = 0;
1723  }
1724 
1725  pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
1726  if (pict->quality < 0)
1727  return INT_MIN;
1728  enc->lambda= pict->quality * 3/2;
1729  delta_qlog= qscale2qlog(pict->quality) - s->qlog;
1730  s->qlog+= delta_qlog;
1731  return delta_qlog;
1732 }
1733 
1735  int width = p->width;
1736  int height= p->height;
1737  int level, orientation, x, y;
1738 
1739  for(level=0; level<s->spatial_decomposition_count; level++){
1740  int64_t error=0;
1741  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1742  SubBand *b= &p->band[level][orientation];
1743  IDWTELEM *ibuf= b->ibuf;
1744 
1745  memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
1746  ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
1747  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
1748  for(y=0; y<height; y++){
1749  for(x=0; x<width; x++){
1750  int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
1751  error += d*d;
1752  }
1753  }
1754  if (orientation == 2)
1755  error /= 2;
1756  b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
1757  if (orientation != 1)
1758  error = 0;
1759  }
1760  p->band[level][1].qlog = p->band[level][2].qlog;
1761  }
1762 }
1763 
1765  const AVFrame *pict, int *got_packet)
1766 {
1767  SnowEncContext *const enc = avctx->priv_data;
1768  SnowContext *const s = &enc->com;
1769  MPVEncContext *const mpv = &enc->m.s;
1770  RangeCoder * const c= &s->c;
1771  AVCodecInternal *avci = avctx->internal;
1772  AVFrame *pic;
1773  const int width= s->avctx->width;
1774  const int height= s->avctx->height;
1775  int level, orientation, plane_index, i, y, ret;
1776  uint8_t rc_header_bak[sizeof(s->header_state)];
1777  uint8_t rc_block_bak[sizeof(s->block_state)];
1778 
1779  if ((ret = ff_alloc_packet(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
1780  return ret;
1781 
1783  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1784 
1785  for(i=0; i < s->nb_planes; i++){
1786  int hshift= i ? s->chroma_h_shift : 0;
1787  int vshift= i ? s->chroma_v_shift : 0;
1788  for(y=0; y<AV_CEIL_RSHIFT(height, vshift); y++)
1789  memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
1790  &pict->data[i][y * pict->linesize[i]],
1791  AV_CEIL_RSHIFT(width, hshift));
1792  enc->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
1793  AV_CEIL_RSHIFT(width, hshift), AV_CEIL_RSHIFT(height, vshift),
1794  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1795  EDGE_TOP | EDGE_BOTTOM);
1796 
1797  }
1798  pic = s->input_picture;
1799  pic->pict_type = pict->pict_type;
1800  pic->quality = pict->quality;
1801 
1802  mpv->picture_number = avctx->frame_num;
1803  if(avctx->flags&AV_CODEC_FLAG_PASS2){
1804  mpv->c.pict_type = pic->pict_type = enc->m.rc_context.entry[avctx->frame_num].new_pict_type;
1805  s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
1806  if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
1807  pic->quality = ff_rate_estimate_qscale(&enc->m, 0);
1808  if (pic->quality < 0)
1809  return -1;
1810  }
1811  }else{
1812  s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
1813  mpv->c.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1814  }
1815 
1816  if (enc->pass1_rc && avctx->frame_num == 0)
1817  pic->quality = 2*FF_QP2LAMBDA;
1818  if (pic->quality) {
1819  s->qlog = qscale2qlog(pic->quality);
1820  enc->lambda = pic->quality * 3/2;
1821  }
1822  if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
1823  s->qlog= LOSSLESS_QLOG;
1824  enc->lambda = 0;
1825  }//else keep previous frame's qlog until after motion estimation
1826 
1827  if (s->current_picture->data[0]) {
1828  int w = s->avctx->width;
1829  int h = s->avctx->height;
1830 
1831  enc->mpvencdsp.draw_edges(s->current_picture->data[0],
1832  s->current_picture->linesize[0], w , h ,
1834  if (s->current_picture->data[2]) {
1835  enc->mpvencdsp.draw_edges(s->current_picture->data[1],
1836  s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1837  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1838  enc->mpvencdsp.draw_edges(s->current_picture->data[2],
1839  s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1840  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1841  }
1842  }
1843 
1845  ret = get_encode_buffer(s, s->current_picture);
1846  if (ret < 0)
1847  return ret;
1848 
1849  mpv->c.cur_pic.ptr = &enc->cur_pic;
1850  mpv->c.cur_pic.ptr->f = s->current_picture;
1851  mpv->c.cur_pic.ptr->f->pts = pict->pts;
1852  if(pic->pict_type == AV_PICTURE_TYPE_P){
1853  int block_width = (width +15)>>4;
1854  int block_height= (height+15)>>4;
1855  int stride= s->current_picture->linesize[0];
1856 
1857  av_assert0(s->current_picture->data[0]);
1858  av_assert0(s->last_picture[0]->data[0]);
1859 
1860  mpv->c.avctx = s->avctx;
1861  mpv->c.last_pic.ptr = &enc->last_pic;
1862  mpv->c.last_pic.ptr->f = s->last_picture[0];
1863  mpv-> new_pic = s->input_picture;
1864  mpv->c.linesize = stride;
1865  mpv->c.uvlinesize = s->current_picture->linesize[1];
1866  mpv->c.width = width;
1867  mpv->c.height = height;
1868  mpv->c.mb_width = block_width;
1869  mpv->c.mb_height = block_height;
1870  mpv->c.mb_stride = mpv->c.mb_width + 1;
1871  mpv->c.b8_stride = 2 * mpv->c.mb_width + 1;
1872  mpv->f_code = 1;
1873  mpv->c.pict_type = pic->pict_type;
1874  mpv->me.motion_est = enc->motion_est;
1875  mpv->me.dia_size = avctx->dia_size;
1876  mpv->c.quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
1877  mpv->c.out_format = FMT_H263;
1878  mpv->me.unrestricted_mv = 1;
1879 
1880  mpv->lambda = enc->lambda;
1881  mpv->c.qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
1882  enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
1883 
1884  mpv->c.qdsp = enc->qdsp; //move
1885  mpv->c.hdsp = s->hdsp;
1886  ff_me_init_pic(mpv);
1887  s->hdsp = mpv->c.hdsp;
1888  }
1889 
1890  if (enc->pass1_rc) {
1891  memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
1892  memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
1893  }
1894 
1895 redo_frame:
1896 
1897  s->spatial_decomposition_count= 5;
1898 
1899  while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
1900  || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
1901  s->spatial_decomposition_count--;
1902 
1903  if (s->spatial_decomposition_count <= 0) {
1904  av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
1905  return AVERROR(EINVAL);
1906  }
1907 
1908  mpv->c.pict_type = pic->pict_type;
1909  s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
1910 
1912 
1913  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1914  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1915  calculate_visual_weight(s, &s->plane[plane_index]);
1916  }
1917  }
1918 
1919  encode_header(s);
1920  mpv->misc_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
1921  encode_blocks(enc, 1);
1922  mpv->mv_bits = 8 * (s->c.bytestream - s->c.bytestream_start) - mpv->misc_bits;
1923 
1924  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1925  Plane *p= &s->plane[plane_index];
1926  int w= p->width;
1927  int h= p->height;
1928  int x, y;
1929 // int bits= put_bits_count(&s->c.pb);
1930 
1931  if (!enc->memc_only) {
1932  //FIXME optimize
1933  if(pict->data[plane_index]) //FIXME gray hack
1934  for(y=0; y<h; y++){
1935  for(x=0; x<w; x++){
1936  s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
1937  }
1938  }
1939  predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
1940 
1941  if( plane_index==0
1942  && pic->pict_type == AV_PICTURE_TYPE_P
1943  && !(avctx->flags&AV_CODEC_FLAG_PASS2)
1944  && mpv->me.scene_change_score > enc->scenechange_threshold) {
1946  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1948  s->keyframe=1;
1949  s->current_picture->flags |= AV_FRAME_FLAG_KEY;
1950  emms_c();
1951  goto redo_frame;
1952  }
1953 
1954  if(s->qlog == LOSSLESS_QLOG){
1955  for(y=0; y<h; y++){
1956  for(x=0; x<w; x++){
1957  s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
1958  }
1959  }
1960  }else{
1961  for(y=0; y<h; y++){
1962  for(x=0; x<w; x++){
1963  s->spatial_dwt_buffer[y*w + x]= s->spatial_idwt_buffer[y*w + x] * (1 << ENCODER_EXTRA_BITS);
1964  }
1965  }
1966  }
1967 
1968  ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1969 
1970  if (enc->pass1_rc && plane_index==0) {
1971  int delta_qlog = ratecontrol_1pass(enc, pic);
1972  if (delta_qlog <= INT_MIN)
1973  return -1;
1974  if(delta_qlog){
1975  //reordering qlog in the bitstream would eliminate this reset
1977  memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
1978  memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
1979  encode_header(s);
1980  encode_blocks(enc, 0);
1981  }
1982  }
1983 
1984  for(level=0; level<s->spatial_decomposition_count; level++){
1985  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1986  SubBand *b= &p->band[level][orientation];
1987 
1988  quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
1989  if(orientation==0)
1990  decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
1991  if (!enc->no_bitstream)
1992  encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
1993  av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
1994  if(orientation==0)
1995  correlate(s, b, b->ibuf, b->stride, 1, 0);
1996  }
1997  }
1998 
1999  for(level=0; level<s->spatial_decomposition_count; level++){
2000  for(orientation=level ? 1 : 0; orientation<4; orientation++){
2001  SubBand *b= &p->band[level][orientation];
2002 
2003  dequantize(s, b, b->ibuf, b->stride);
2004  }
2005  }
2006 
2007  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
2008  if(s->qlog == LOSSLESS_QLOG){
2009  for(y=0; y<h; y++){
2010  for(x=0; x<w; x++){
2011  s->spatial_idwt_buffer[y*w + x] *= 1 << FRAC_BITS;
2012  }
2013  }
2014  }
2015  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2016  }else{
2017  //ME/MC only
2018  if(pic->pict_type == AV_PICTURE_TYPE_I){
2019  for(y=0; y<h; y++){
2020  for(x=0; x<w; x++){
2021  s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
2022  pict->data[plane_index][y*pict->linesize[plane_index] + x];
2023  }
2024  }
2025  }else{
2026  memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
2027  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2028  }
2029  }
2030  if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
2031  int64_t error= 0;
2032 
2033  if(pict->data[plane_index]) //FIXME gray hack
2034  for(y=0; y<h; y++){
2035  for(x=0; x<w; x++){
2036  int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
2037  error += d*d;
2038  }
2039  }
2040  s->avctx->error[plane_index] += error;
2041  enc->encoding_error[plane_index] = error;
2042  }
2043 
2044  }
2045  emms_c();
2046 
2048 
2049  av_frame_unref(s->last_picture[s->max_ref_frames - 1]);
2050 
2051  s->current_picture->pict_type = pic->pict_type;
2052  s->current_picture->quality = pic->quality;
2053  enc->m.frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
2054  mpv->p_tex_bits = enc->m.frame_bits - mpv->misc_bits - mpv->mv_bits;
2055  enc->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
2057  enc->cur_pic.coded_picture_number = avctx->frame_num;
2058  enc->cur_pic.f->quality = pic->quality;
2059  if (enc->pass1_rc) {
2060  ret = ff_rate_estimate_qscale(&enc->m, 0);
2061  if (ret < 0)
2062  return ret;
2063  }
2064  if(avctx->flags&AV_CODEC_FLAG_PASS1)
2065  ff_write_pass1_stats(&enc->m);
2066  enc->m.last_pict_type = mpv->c.pict_type;
2067 
2068  ff_encode_add_stats_side_data(pkt, s->current_picture->quality,
2069  enc->encoding_error,
2070  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? SNOW_MAX_PLANES : 0,
2071  s->current_picture->pict_type);
2072  if (s->avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
2073  av_frame_replace(avci->recon_frame, s->current_picture);
2074  }
2075 
2076  pkt->size = ff_rac_terminate(c, 0);
2077  if (s->current_picture->flags & AV_FRAME_FLAG_KEY)
2079  *got_packet = 1;
2080 
2081  return 0;
2082 }
2083 
2085 {
2086  SnowEncContext *const enc = avctx->priv_data;
2087  SnowContext *const s = &enc->com;
2088 
2091  av_frame_free(&s->input_picture);
2092 
2093  for (int i = 0; i < MAX_REF_FRAMES; i++) {
2094  av_freep(&s->ref_mvs[i]);
2095  av_freep(&s->ref_scores[i]);
2096  }
2097 
2098  enc->m.s.me.temp = NULL;
2099  av_freep(&enc->m.s.me.scratchpad);
2100  av_freep(&enc->emu_edge_buffer);
2101 
2102  av_freep(&avctx->stats_out);
2103 
2104  return 0;
2105 }
2106 
2107 #define OFFSET(x) offsetof(SnowEncContext, x)
2108 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2109 static const AVOption options[] = {
2110  {"motion_est", "motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_ITER, VE, .unit = "motion_est" },
2111  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, VE, .unit = "motion_est" },
2112  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, VE, .unit = "motion_est" },
2113  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, VE, .unit = "motion_est" },
2114  { "iter", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ITER }, 0, 0, VE, .unit = "motion_est" },
2115  { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2116  { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2117  { "intra_penalty", "Penalty for intra blocks in block decision", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2118  { "iterative_dia_size", "Dia size for the iterative ME", OFFSET(iterative_dia_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2119  { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, VE },
2120  { "pred", "Spatial decomposition type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, DWT_97, DWT_53, VE, .unit = "pred" },
2121  { "dwt97", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2122  { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2123  { "rc_eq", "Set rate control equation. When computing the expression, besides the standard functions "
2124  "defined in the section 'Expression Evaluation', the following functions are available: "
2125  "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2126  "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
2127  OFFSET(m.rc_context.rc_eq), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
2128  { NULL },
2129 };
2130 
2131 static const AVClass snowenc_class = {
2132  .class_name = "snow encoder",
2133  .item_name = av_default_item_name,
2134  .option = options,
2135  .version = LIBAVUTIL_VERSION_INT,
2136 };
2137 
2139  .p.name = "snow",
2140  CODEC_LONG_NAME("Snow"),
2141  .p.type = AVMEDIA_TYPE_VIDEO,
2142  .p.id = AV_CODEC_ID_SNOW,
2143  .p.capabilities = AV_CODEC_CAP_DR1 |
2146  .priv_data_size = sizeof(SnowEncContext),
2147  .init = encode_init,
2149  .close = encode_end,
2152  .color_ranges = AVCOL_RANGE_MPEG,
2153  .p.priv_class = &snowenc_class,
2154  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2155 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:392
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:254
encode_subband
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:1062
MPVEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideoenc.h:137
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1521
MpegEncContext::hdsp
HpelDSPContext hdsp
Definition: mpegvideo.h:159
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:405
P_LEFT
#define P_LEFT
Definition: snowenc.c:368
level
uint8_t level
Definition: svq3.c:208
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:191
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:432
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SnowEncContext::lambda
int lambda
Definition: snowenc.c:50
libm.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MID_STATE
#define MID_STATE
Definition: snow.h:39
color
Definition: vf_paletteuse.c:513
ratecontrol_1pass
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
Definition: snowenc.c:1680
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
FF_ME_EPZS
#define FF_ME_EPZS
Definition: motion_est.h:43
inverse
inverse
Definition: af_crystalizer.c:122
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: snowenc.c:2084
SnowEncContext::scenechange_threshold
int scenechange_threshold
Definition: snowenc.c:60
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:72
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MotionEstContext
Motion estimation context.
Definition: motion_est.h:49
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:255
int64_t
long long int64_t
Definition: coverity.c:34
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:159
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:160
h263enc.h
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
MPVEncContext::mv_bits
int mv_bits
Definition: mpegvideoenc.h:133
DWT_97
#define DWT_97
Definition: snow_dwt.h:70
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:39
update_last_header_values
static void update_last_header_values(SnowContext *s)
Definition: snowenc.c:1655
MpegEncContext::pict_type
enum AVPictureType pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:154
internal.h
iterative_me
static void iterative_me(SnowEncContext *enc)
Definition: snowenc.c:1185
AVPacket::data
uint8_t * data
Definition: packet.h:588
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
SnowEncContext::qdsp
QpelDSPContext qdsp
Definition: snowenc.c:47
DWT_53
#define DWT_53
Definition: snow_dwt.h:71
get_penalty_factor
static int get_penalty_factor(int lambda, int lambda2, int type)
Definition: snowenc.c:343
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
encode_subband_c0run
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:942
rangecoder.h
FFCodec
Definition: codec_internal.h:127
MpegEncContext::b8_stride
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:98
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
SnowContext
Definition: snow.h:113
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: snowenc.c:1764
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:269
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:630
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
ff_spatial_dwt
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:320
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
px
#define px
Definition: ops_tmpl_float.c:35
check_4block_inter
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
Definition: snowenc.c:1136
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_spatial_idwt
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:732
SnowEncContext::me_cache_generation
unsigned me_cache_generation
Definition: snowenc.c:67
encode_blocks
static void encode_blocks(SnowEncContext *enc, int search)
Definition: snowenc.c:1414
ff_init_range_encoder
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
Definition: rangecoder.c:42
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:48
BlockNode
Definition: snow.h:50
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:697
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:102
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
check_block_intra
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1069
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
OFFSET
#define OFFSET(x)
Definition: snowenc.c:2107
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:372
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
get_4block_rd
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:871
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:359
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:878
ff_sqrt
#define ff_sqrt
Definition: mathops.h:220
SnowEncContext
Definition: snowenc.c:45
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:538
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:106
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MpegEncContext::qdsp
QpelDSPContext qdsp
Definition: mpegvideo.h:161
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:132
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
encode_q_branch
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
Definition: snowenc.c:375
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:882
emms_c
#define emms_c()
Definition: emms.h:89
SnowEncContext::mecc
MECmpContext mecc
Definition: snowenc.c:62
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1225
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:270
BLOCK_OPT
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
Definition: snow.h:58
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:264
calculate_visual_weight
static void calculate_visual_weight(SnowContext *s, Plane *p)
Definition: snowenc.c:1734
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
MpegEncContext::mb_num
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:100
P
#define P
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:222
pix_norm1
static int pix_norm1(const uint8_t *pix, int line_size, int w)
Definition: snowenc.c:327
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:482
PTR_ADD
#define PTR_ADD(ptr, off)
Definition: snowenc.c:76
get_encode_buffer
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
Definition: snowenc.c:142
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SnowEncContext::encoding_error
uint64_t encoding_error[SNOW_MAX_PLANES]
Definition: snowenc.c:69
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
MotionEstContext::dia_size
int dia_size
Definition: motion_est.h:71
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
MECmpContext
Definition: me_cmp.h:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:152
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:236
run
uint8_t run
Definition: svq3.c:207
SnowEncContext::me_cache
unsigned me_cache[ME_CACHE_SIZE]
Definition: snowenc.c:66
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
MpegvideoEncDSPContext::draw_edges
void(* draw_edges)(uint8_t *buf, ptrdiff_t wrap, int width, int height, int w, int h, int sides)
Definition: mpegvideoencdsp.h:46
snow.h
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:908
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
get_block_rd
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
Definition: snowenc.c:767
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
VE
#define VE
Definition: snowenc.c:2108
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:489
ff_rac_terminate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
Definition: rangecoder.c:109
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
mathops.h
options
Definition: swscale.c:43
SnowEncContext::obmc_scratchpad
IDWTELEM obmc_scratchpad[MB_SIZE *MB_SIZE *12 *2]
Definition: snowenc.c:73
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
correlate
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1545
QROOT
#define QROOT
Definition: snow.h:43
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MpegEncContext::mb_width
int mb_width
Definition: mpegvideo.h:96
MPVMainEncContext
Definition: mpegvideoenc.h:202
ff_h263_get_mv_penalty
const uint8_t(* ff_h263_get_mv_penalty(void))[MAX_DMV *2+1]
Definition: ituh263enc.c:148
FF_ME_XONE
#define FF_ME_XONE
Definition: motion_est.h:44
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_ref
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
Definition: snowenc.c:78
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
put_symbol
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
Definition: snowenc.c:95
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:230
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1320
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:262
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:120
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
AVPacket::size
int size
Definition: packet.h:589
SNOW_MAX_PLANES
#define SNOW_MAX_PLANES
Definition: snow.h:37
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1013
height
#define height
Definition: dsp.h:89
encode_header
static void encode_header(SnowContext *s)
Definition: snowenc.c:1582
codec_internal.h
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:881
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
SnowEncContext::pass1_rc
int pass1_rc
Definition: snowenc.c:52
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:97
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:888
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
MotionEstContext::mv_penalty
const uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:100
pix_sum
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
Definition: snowenc.c:311
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:256
SnowEncContext::motion_est
int motion_est
Definition: snowenc.c:58
ff_snow_encoder
const FFCodec ff_snow_encoder
Definition: snowenc.c:2138
SubBand
Definition: cfhd.h:116
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:879
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
SnowEncContext::intra_penalty
int intra_penalty
Definition: snowenc.c:57
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
snow_dwt.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
AVCodecInternal
Definition: internal.h:49
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:877
encode_q_branch2
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
Definition: snowenc.c:613
SnowEncContext::iterative_dia_size
int iterative_dia_size
Definition: snowenc.c:59
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:104
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
ENCODER_EXTRA_BITS
#define ENCODER_EXTRA_BITS
Definition: snow.h:74
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:244
log.h
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
MPVEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideoenc.h:80
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:883
get_block_bits
static int get_block_bits(SnowContext *s, int x, int y, int w)
Definition: snowenc.c:729
ff_get_mb_score
int ff_get_mb_score(MPVEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
Definition: motion_est_template.c:192
ff_w53_32_c
int ff_w53_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:833
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
MotionEstContext::motion_est
int motion_est
ME algorithm.
Definition: motion_est.h:51
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
qscale2qlog
static int qscale2qlog(int qscale)
Definition: snowenc.c:1675
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:496
av_always_inline
#define av_always_inline
Definition: attributes.h:63
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVCodecContext::dia_size
int dia_size
ME diamond size & shape.
Definition: avcodec.h:900
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:887
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodecContext::mb_lmin
int mb_lmin
minimum MB Lagrange multiplier
Definition: avcodec.h:986
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:37
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:398
SnowEncContext::no_bitstream
int no_bitstream
Definition: snowenc.c:56
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
ME_CACHE_SIZE
#define ME_CACHE_SIZE
Definition: snowenc.c:65
SnowEncContext::com
SnowContext com
Definition: snowenc.c:46
FF_ME_ITER
#define FF_ME_ITER
Definition: snowenc.c:43
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
get_dc
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:669
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
SnowEncContext::m
MPVMainEncContext m
Definition: snowenc.c:63
log2
#define log2(x)
Definition: libm.h:406
avcodec.h
ff_w97_32_c
int ff_w97_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:838
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
mid_pred
#define mid_pred
Definition: mathops.h:115
ret
ret
Definition: filter_design.txt:187
SnowEncContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: snowenc.c:48
pred
static const float pred[4]
Definition: siprdata.h:259
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:152
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: snowenc.c:164
options
static const AVOption options[]
Definition: snowenc.c:2109
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:114
square
static int square(int x)
Definition: roqvideoenc.c:196
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_rac
#define put_rac(C, S, B)
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:150
me_cmp.h
encode_qlogs
static void encode_qlogs(SnowContext *s)
Definition: snowenc.c:1569
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:376
QpelDSPContext
quarterpel DSP context
Definition: qpeldsp.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:267
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
SnowEncContext::cur_pic
MPVPicture cur_pic
Definition: snowenc.c:64
SnowEncContext::last_pic
MPVPicture last_pic
Definition: snowenc.c:64
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:236
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:880
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:84
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
get_rac_count
static int get_rac_count(RangeCoder *c)
Definition: rangecoder.h:79
AVCodecContext::mb_lmax
int mb_lmax
maximum MB Lagrange multiplier
Definition: avcodec.h:993
put_symbol2
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Definition: snowenc.c:123
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
Plane
Definition: cfhd.h:125
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
same_block
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
Definition: snow.h:212
mem.h
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
w
uint8_t w
Definition: llvidencdsp.c:39
ff_epzs_motion_search
int ff_epzs_motion_search(MPVEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
Definition: motion_est_template.c:977
mcf
#define mcf(dx, dy)
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:248
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:599
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:891
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
SnowEncContext::emu_edge_buffer
uint8_t * emu_edge_buffer
Definition: snowenc.c:71
quantize
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
Definition: snowenc.c:1438
SnowEncContext::memc_only
int memc_only
Definition: snowenc.c:55
dequantize
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
Definition: snowenc.c:1499
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
null_block
static const BlockNode null_block
Definition: snow.h:63
MotionEstContext::scene_change_score
int scene_change_score
Definition: motion_est.h:86
MPVEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideoenc.h:135
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:85
stride
#define stride
Definition: h264pred_template.c:536
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
snowenc_class
static const AVClass snowenc_class
Definition: snowenc.c:2131
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
state
static struct @557 state
SnowEncContext::pred
int pred
Definition: snowenc.c:54
P_TOP
#define P_TOP
Definition: snowenc.c:369
check_block_inter
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1100
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:164
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:203
P_TOPRIGHT
#define P_TOPRIGHT
Definition: snowenc.c:370
MpegEncContext::width
int width
Definition: mpegvideo.h:84
src
#define src
Definition: vp8dsp.c:248
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:101
MPVEncContext::picture_number
int picture_number
Definition: mpegvideoenc.h:130
MotionEstContext::me_cmp
me_cmp_func me_cmp[6]
Definition: motion_est.h:89
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:96
P_MEDIAN
#define P_MEDIAN
Definition: snowenc.c:371
FF_ME_ZERO
#define FF_ME_ZERO
Definition: motion_est.h:42
SnowEncContext::lambda2
int lambda2
Definition: snowenc.c:51
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:889
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
MotionEstContext::unrestricted_mv
int unrestricted_mv
mv can point outside of the coded picture
Definition: motion_est.h:72
intmath.h