FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq1enc.c
Go to the documentation of this file.
1 /*
2  * SVQ1 Encoder
3  * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sorenson Vector Quantizer #1 (SVQ1) video codec.
25  * For more information of the SVQ1 algorithm, visit:
26  * http://www.pcisys.net/~melanson/codecs/
27  */
28 
29 #include "avcodec.h"
30 #include "dsputil.h"
31 #include "hpeldsp.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "internal.h"
35 #include "libavutil/avassert.h"
36 #include "svq1.h"
37 #include "svq1enc_cb.h"
38 
39 
40 typedef struct SVQ1Context {
41  /* FIXME: Needed for motion estimation, should not be used for anything
42  * else, the idea is to make the motion estimation eventually independent
43  * of MpegEncContext, so this will be removed then. */
52 
53  /* why ooh why this sick breadth first order,
54  * everything is slower and more complex */
56 
59 
60  /* Y plane block dimensions */
63 
64  /* U & V plane (C planes) block dimensions */
67 
68  uint16_t *mb_type;
69  uint32_t *dummy;
70  int16_t (*motion_val8[3])[2];
71  int16_t (*motion_val16[3])[2];
72 
73  int64_t rd_total;
74 
76 } SVQ1Context;
77 
78 static void svq1_write_header(SVQ1Context *s, int frame_type)
79 {
80  int i;
81 
82  /* frame code */
83  put_bits(&s->pb, 22, 0x20);
84 
85  /* temporal reference (sure hope this is a "don't care") */
86  put_bits(&s->pb, 8, 0x00);
87 
88  /* frame type */
89  put_bits(&s->pb, 2, frame_type - 1);
90 
91  if (frame_type == AV_PICTURE_TYPE_I) {
92  /* no checksum since frame code is 0x20 */
93  /* no embedded string either */
94  /* output 5 unknown bits (2 + 2 + 1) */
95  put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
96 
99  s->frame_width, s->frame_height);
100  put_bits(&s->pb, 3, i);
101 
102  if (i == 7) {
103  put_bits(&s->pb, 12, s->frame_width);
104  put_bits(&s->pb, 12, s->frame_height);
105  }
106  }
107 
108  /* no checksum or extra data (next 2 bits get 0) */
109  put_bits(&s->pb, 2, 0);
110 }
111 
112 #define QUALITY_THRESHOLD 100
113 #define THRESHOLD_MULTIPLIER 0.6
114 
116  uint8_t *decoded, int stride, int level,
117  int threshold, int lambda, int intra)
118 {
119  int count, y, x, i, j, split, best_mean, best_score, best_count;
120  int best_vector[6];
121  int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
122  int w = 2 << (level + 2 >> 1);
123  int h = 2 << (level + 1 >> 1);
124  int size = w * h;
125  int16_t block[7][256];
126  const int8_t *codebook_sum, *codebook;
127  const uint16_t(*mean_vlc)[2];
128  const uint8_t(*multistage_vlc)[2];
129 
130  best_score = 0;
131  // FIXME: Optimize, this does not need to be done multiple times.
132  if (intra) {
133  codebook_sum = svq1_intra_codebook_sum[level];
134  codebook = ff_svq1_intra_codebooks[level];
135  mean_vlc = ff_svq1_intra_mean_vlc;
136  multistage_vlc = ff_svq1_intra_multistage_vlc[level];
137  for (y = 0; y < h; y++) {
138  for (x = 0; x < w; x++) {
139  int v = src[x + y * stride];
140  block[0][x + w * y] = v;
141  best_score += v * v;
142  block_sum[0] += v;
143  }
144  }
145  } else {
146  codebook_sum = svq1_inter_codebook_sum[level];
147  codebook = ff_svq1_inter_codebooks[level];
148  mean_vlc = ff_svq1_inter_mean_vlc + 256;
149  multistage_vlc = ff_svq1_inter_multistage_vlc[level];
150  for (y = 0; y < h; y++) {
151  for (x = 0; x < w; x++) {
152  int v = src[x + y * stride] - ref[x + y * stride];
153  block[0][x + w * y] = v;
154  best_score += v * v;
155  block_sum[0] += v;
156  }
157  }
158  }
159 
160  best_count = 0;
161  best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
162  best_mean = block_sum[0] + (size >> 1) >> (level + 3);
163 
164  if (level < 4) {
165  for (count = 1; count < 7; count++) {
166  int best_vector_score = INT_MAX;
167  int best_vector_sum = -999, best_vector_mean = -999;
168  const int stage = count - 1;
169  const int8_t *vector;
170 
171  for (i = 0; i < 16; i++) {
172  int sum = codebook_sum[stage * 16 + i];
173  int sqr, diff, score;
174 
175  vector = codebook + stage * size * 16 + i * size;
176  sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size);
177  diff = block_sum[stage] - sum;
178  score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64bit slooow
179  if (score < best_vector_score) {
180  int mean = diff + (size >> 1) >> (level + 3);
181  av_assert2(mean > -300 && mean < 300);
182  mean = av_clip(mean, intra ? 0 : -256, 255);
183  best_vector_score = score;
184  best_vector[stage] = i;
185  best_vector_sum = sum;
186  best_vector_mean = mean;
187  }
188  }
189  av_assert0(best_vector_mean != -999);
190  vector = codebook + stage * size * 16 + best_vector[stage] * size;
191  for (j = 0; j < size; j++)
192  block[stage + 1][j] = block[stage][j] - vector[j];
193  block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
194  best_vector_score += lambda *
195  (+1 + 4 * count +
196  multistage_vlc[1 + count][1]
197  + mean_vlc[best_vector_mean][1]);
198 
199  if (best_vector_score < best_score) {
200  best_score = best_vector_score;
201  best_count = count;
202  best_mean = best_vector_mean;
203  }
204  }
205  }
206 
207  split = 0;
208  if (best_score > threshold && level) {
209  int score = 0;
210  int offset = level & 1 ? stride * h / 2 : w / 2;
211  PutBitContext backup[6];
212 
213  for (i = level - 1; i >= 0; i--)
214  backup[i] = s->reorder_pb[i];
215  score += encode_block(s, src, ref, decoded, stride, level - 1,
216  threshold >> 1, lambda, intra);
217  score += encode_block(s, src + offset, ref + offset, decoded + offset,
218  stride, level - 1, threshold >> 1, lambda, intra);
219  score += lambda;
220 
221  if (score < best_score) {
222  best_score = score;
223  split = 1;
224  } else {
225  for (i = level - 1; i >= 0; i--)
226  s->reorder_pb[i] = backup[i];
227  }
228  }
229  if (level > 0)
230  put_bits(&s->reorder_pb[level], 1, split);
231 
232  if (!split) {
233  av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
234  av_assert1(best_mean >= -256 && best_mean < 256);
235  av_assert1(best_count >= 0 && best_count < 7);
236  av_assert1(level < 4 || best_count == 0);
237 
238  /* output the encoding */
239  put_bits(&s->reorder_pb[level],
240  multistage_vlc[1 + best_count][1],
241  multistage_vlc[1 + best_count][0]);
242  put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
243  mean_vlc[best_mean][0]);
244 
245  for (i = 0; i < best_count; i++) {
246  av_assert2(best_vector[i] >= 0 && best_vector[i] < 16);
247  put_bits(&s->reorder_pb[level], 4, best_vector[i]);
248  }
249 
250  for (y = 0; y < h; y++)
251  for (x = 0; x < w; x++)
252  decoded[x + y * stride] = src[x + y * stride] -
253  block[best_count][x + w * y] +
254  best_mean;
255  }
256 
257  return best_score;
258 }
259 
260 static int svq1_encode_plane(SVQ1Context *s, int plane,
261  unsigned char *src_plane,
262  unsigned char *ref_plane,
263  unsigned char *decoded_plane,
264  int width, int height, int src_stride, int stride)
265 {
266  const AVFrame *f = s->avctx->coded_frame;
267  int x, y;
268  int i;
269  int block_width, block_height;
270  int level;
271  int threshold[6];
272  uint8_t *src = s->scratchbuf + stride * 16;
273  const int lambda = (f->quality * f->quality) >>
274  (2 * FF_LAMBDA_SHIFT);
275 
276  /* figure out the acceptable level thresholds in advance */
277  threshold[5] = QUALITY_THRESHOLD;
278  for (level = 4; level >= 0; level--)
279  threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
280 
281  block_width = (width + 15) / 16;
282  block_height = (height + 15) / 16;
283 
284  if (f->pict_type == AV_PICTURE_TYPE_P) {
285  s->m.avctx = s->avctx;
287  s->m.last_picture_ptr = &s->m.last_picture;
288  s->m.last_picture.f.data[0] = ref_plane;
289  s->m.linesize =
290  s->m.last_picture.f.linesize[0] =
291  s->m.new_picture.f.linesize[0] =
293  s->m.width = width;
294  s->m.height = height;
295  s->m.mb_width = block_width;
296  s->m.mb_height = block_height;
297  s->m.mb_stride = s->m.mb_width + 1;
298  s->m.b8_stride = 2 * s->m.mb_width + 1;
299  s->m.f_code = 1;
300  s->m.pict_type = f->pict_type;
301  s->m.me_method = s->avctx->me_method;
302  s->m.me.scene_change_score = 0;
303  s->m.flags = s->avctx->flags;
304  // s->m.out_format = FMT_H263;
305  // s->m.unrestricted_mv = 1;
306  s->m.lambda = f->quality;
307  s->m.qscale = s->m.lambda * 139 +
308  FF_LAMBDA_SCALE * 64 >>
309  FF_LAMBDA_SHIFT + 7;
310  s->m.lambda2 = s->m.lambda * s->m.lambda +
311  FF_LAMBDA_SCALE / 2 >>
313 
314  if (!s->motion_val8[plane]) {
315  s->motion_val8[plane] = av_mallocz((s->m.b8_stride *
316  block_height * 2 + 2) *
317  2 * sizeof(int16_t));
318  s->motion_val16[plane] = av_mallocz((s->m.mb_stride *
319  (block_height + 2) + 1) *
320  2 * sizeof(int16_t));
321  }
322 
323  s->m.mb_type = s->mb_type;
324 
325  // dummies, to avoid segfaults
327  s->m.current_picture.mb_var = (uint16_t *)s->dummy;
328  s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
329  s->m.current_picture.mb_type = s->dummy;
330 
331  s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
332  s->m.p_mv_table = s->motion_val16[plane] +
333  s->m.mb_stride + 1;
334  s->m.dsp = s->dsp; // move
335  ff_init_me(&s->m);
336 
337  s->m.me.dia_size = s->avctx->dia_size;
338  s->m.first_slice_line = 1;
339  for (y = 0; y < block_height; y++) {
340  s->m.new_picture.f.data[0] = src - y * 16 * stride; // ugly
341  s->m.mb_y = y;
342 
343  for (i = 0; i < 16 && i + 16 * y < height; i++) {
344  memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
345  width);
346  for (x = width; x < 16 * block_width; x++)
347  src[i * stride + x] = src[i * stride + x - 1];
348  }
349  for (; i < 16 && i + 16 * y < 16 * block_height; i++)
350  memcpy(&src[i * stride], &src[(i - 1) * stride],
351  16 * block_width);
352 
353  for (x = 0; x < block_width; x++) {
354  s->m.mb_x = x;
355  ff_init_block_index(&s->m);
357 
358  ff_estimate_p_frame_motion(&s->m, x, y);
359  }
360  s->m.first_slice_line = 0;
361  }
362 
363  ff_fix_long_p_mvs(&s->m);
364  ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
366  }
367 
368  s->m.first_slice_line = 1;
369  for (y = 0; y < block_height; y++) {
370  for (i = 0; i < 16 && i + 16 * y < height; i++) {
371  memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
372  width);
373  for (x = width; x < 16 * block_width; x++)
374  src[i * stride + x] = src[i * stride + x - 1];
375  }
376  for (; i < 16 && i + 16 * y < 16 * block_height; i++)
377  memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
378 
379  s->m.mb_y = y;
380  for (x = 0; x < block_width; x++) {
381  uint8_t reorder_buffer[3][6][7 * 32];
382  int count[3][6];
383  int offset = y * 16 * stride + x * 16;
384  uint8_t *decoded = decoded_plane + offset;
385  uint8_t *ref = ref_plane + offset;
386  int score[4] = { 0, 0, 0, 0 }, best;
387  uint8_t *temp = s->scratchbuf;
388 
389  if (s->pb.buf_end - s->pb.buf -
390  (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size
391  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
392  return -1;
393  }
394 
395  s->m.mb_x = x;
396  ff_init_block_index(&s->m);
398 
399  if (f->pict_type == AV_PICTURE_TYPE_I ||
400  (s->m.mb_type[x + y * s->m.mb_stride] &
402  for (i = 0; i < 6; i++)
403  init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
404  7 * 32);
405  if (f->pict_type == AV_PICTURE_TYPE_P) {
407  put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
408  score[0] = vlc[1] * lambda;
409  }
410  score[0] += encode_block(s, src + 16 * x, NULL, temp, stride,
411  5, 64, lambda, 1);
412  for (i = 0; i < 6; i++) {
413  count[0][i] = put_bits_count(&s->reorder_pb[i]);
414  flush_put_bits(&s->reorder_pb[i]);
415  }
416  } else
417  score[0] = INT_MAX;
418 
419  best = 0;
420 
421  if (f->pict_type == AV_PICTURE_TYPE_P) {
423  int mx, my, pred_x, pred_y, dxy;
424  int16_t *motion_ptr;
425 
426  motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
427  if (s->m.mb_type[x + y * s->m.mb_stride] &
429  for (i = 0; i < 6; i++)
430  init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
431  7 * 32);
432 
433  put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
434 
435  s->m.pb = s->reorder_pb[5];
436  mx = motion_ptr[0];
437  my = motion_ptr[1];
438  av_assert1(mx >= -32 && mx <= 31);
439  av_assert1(my >= -32 && my <= 31);
440  av_assert1(pred_x >= -32 && pred_x <= 31);
441  av_assert1(pred_y >= -32 && pred_y <= 31);
442  ff_h263_encode_motion(&s->m, mx - pred_x, 1);
443  ff_h263_encode_motion(&s->m, my - pred_y, 1);
444  s->reorder_pb[5] = s->m.pb;
445  score[1] += lambda * put_bits_count(&s->reorder_pb[5]);
446 
447  dxy = (mx & 1) + 2 * (my & 1);
448 
449  s->hdsp.put_pixels_tab[0][dxy](temp + 16,
450  ref + (mx >> 1) +
451  stride * (my >> 1),
452  stride, 16);
453 
454  score[1] += encode_block(s, src + 16 * x, temp + 16,
455  decoded, stride, 5, 64, lambda, 0);
456  best = score[1] <= score[0];
457 
459  score[2] = s->dsp.sse[0](NULL, src + 16 * x, ref,
460  stride, 16);
461  score[2] += vlc[1] * lambda;
462  if (score[2] < score[best] && mx == 0 && my == 0) {
463  best = 2;
464  s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
465  for (i = 0; i < 6; i++)
466  count[2][i] = 0;
467  put_bits(&s->pb, vlc[1], vlc[0]);
468  }
469  }
470 
471  if (best == 1) {
472  for (i = 0; i < 6; i++) {
473  count[1][i] = put_bits_count(&s->reorder_pb[i]);
474  flush_put_bits(&s->reorder_pb[i]);
475  }
476  } else {
477  motion_ptr[0] =
478  motion_ptr[1] =
479  motion_ptr[2] =
480  motion_ptr[3] =
481  motion_ptr[0 + 2 * s->m.b8_stride] =
482  motion_ptr[1 + 2 * s->m.b8_stride] =
483  motion_ptr[2 + 2 * s->m.b8_stride] =
484  motion_ptr[3 + 2 * s->m.b8_stride] = 0;
485  }
486  }
487 
488  s->rd_total += score[best];
489 
490  for (i = 5; i >= 0; i--)
491  avpriv_copy_bits(&s->pb, reorder_buffer[best][i],
492  count[best][i]);
493  if (best == 0)
494  s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
495  }
496  s->m.first_slice_line = 0;
497  }
498  return 0;
499 }
500 
502 {
503  SVQ1Context *const s = avctx->priv_data;
504  int i;
505 
506  av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
507  s->rd_total / (double)(avctx->width * avctx->height *
508  avctx->frame_number));
509 
510  av_freep(&s->m.me.scratchpad);
511  av_freep(&s->m.me.map);
512  av_freep(&s->m.me.score_map);
513  av_freep(&s->mb_type);
514  av_freep(&s->dummy);
515  av_freep(&s->scratchbuf);
516 
517  for (i = 0; i < 3; i++) {
518  av_freep(&s->motion_val8[i]);
519  av_freep(&s->motion_val16[i]);
520  }
521 
524  av_frame_free(&avctx->coded_frame);
525 
526  return 0;
527 }
528 
530 {
531  SVQ1Context *const s = avctx->priv_data;
532 
533  ff_dsputil_init(&s->dsp, avctx);
534  ff_hpeldsp_init(&s->hdsp, avctx->flags);
535 
536  avctx->coded_frame = av_frame_alloc();
539  if (!avctx->coded_frame || !s->current_picture || !s->last_picture) {
540  svq1_encode_end(avctx);
541  return AVERROR(ENOMEM);
542  }
543 
544  s->frame_width = avctx->width;
545  s->frame_height = avctx->height;
546 
547  s->y_block_width = (s->frame_width + 15) / 16;
548  s->y_block_height = (s->frame_height + 15) / 16;
549 
550  s->c_block_width = (s->frame_width / 4 + 15) / 16;
551  s->c_block_height = (s->frame_height / 4 + 15) / 16;
552 
553  s->avctx = avctx;
554  s->m.avctx = avctx;
556  s->m.me.temp =
557  s->m.me.scratchpad = av_mallocz((avctx->width + 64) *
558  2 * 16 * 2 * sizeof(uint8_t));
559  s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
560  s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
561  s->mb_type = av_mallocz((s->y_block_width + 1) *
562  s->y_block_height * sizeof(int16_t));
563  s->dummy = av_mallocz((s->y_block_width + 1) *
564  s->y_block_height * sizeof(int32_t));
565  ff_h263_encode_init(&s->m); // mv_penalty
566 
567  return 0;
568 }
569 
571  const AVFrame *pict, int *got_packet)
572 {
573  SVQ1Context *const s = avctx->priv_data;
574  AVFrame *const p = avctx->coded_frame;
575  int i, ret;
576 
577  if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
579  return ret;
580 
581  if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
582  av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
583  return -1;
584  }
585 
586  if (!s->current_picture->data[0]) {
587  if ((ret = ff_get_buffer(avctx, s->current_picture, 0))< 0 ||
588  (ret = ff_get_buffer(avctx, s->last_picture, 0)) < 0) {
589  return ret;
590  }
591  s->scratchbuf = av_malloc(s->current_picture->linesize[0] * 16 * 2);
592  }
593 
595 
596  init_put_bits(&s->pb, pkt->data, pkt->size);
597 
598  p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ?
601  p->quality = pict->quality;
602 
604  for (i = 0; i < 3; i++)
605  if (svq1_encode_plane(s, i,
606  pict->data[i],
607  s->last_picture->data[i],
608  s->current_picture->data[i],
609  s->frame_width / (i ? 4 : 1),
610  s->frame_height / (i ? 4 : 1),
611  pict->linesize[i],
612  s->current_picture->linesize[i]) < 0)
613  return -1;
614 
615  // avpriv_align_put_bits(&s->pb);
616  while (put_bits_count(&s->pb) & 31)
617  put_bits(&s->pb, 1, 0);
618 
619  flush_put_bits(&s->pb);
620 
621  pkt->size = put_bits_count(&s->pb) / 8;
622  if (p->pict_type == AV_PICTURE_TYPE_I)
623  pkt->flags |= AV_PKT_FLAG_KEY;
624  *got_packet = 1;
625 
626  return 0;
627 }
628 
630  .name = "svq1",
631  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
632  .type = AVMEDIA_TYPE_VIDEO,
633  .id = AV_CODEC_ID_SVQ1,
634  .priv_data_size = sizeof(SVQ1Context),
636  .encode2 = svq1_encode_frame,
638  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
639  AV_PIX_FMT_NONE },
640 };