FFmpeg
rpzaenc.c
Go to the documentation of this file.
1 /*
2  * QuickTime RPZA Video Encoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file rpzaenc.c
23  * QT RPZA Video Encoder by Todd Kirby <doubleshot@pacbell.net> and David Adler
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/common.h"
28 #include "libavutil/opt.h"
29 
30 #include "avcodec.h"
31 #include "encode.h"
32 #include "internal.h"
33 #include "put_bits.h"
34 
35 typedef struct RpzaContext {
37 
42 
43  AVFrame *prev_frame; // buffer for previous source frame
44  PutBitContext pb; // buffer for encoded frame data.
45 
46  int frame_width; // width in pixels of source frame
47  int frame_height; // height in pixesl of source frame
48 
49  int first_frame; // flag set to one when the first frame is being processed
50  // so that comparisons with previous frame data in not attempted
51 } RpzaContext;
52 
53 typedef enum channel_offset {
54  RED = 2,
55  GREEN = 1,
56  BLUE = 0,
58 
59 typedef struct rgb {
60  uint8_t r;
61  uint8_t g;
62  uint8_t b;
63 } rgb;
64 
65 #define SQR(x) ((x) * (x))
66 
67 /* 15 bit components */
68 #define GET_CHAN(color, chan) (((color) >> ((chan) * 5) & 0x1F) * 8)
69 #define R(color) GET_CHAN(color, RED)
70 #define G(color) GET_CHAN(color, GREEN)
71 #define B(color) GET_CHAN(color, BLUE)
72 
73 typedef struct BlockInfo {
74  int row;
75  int col;
81  uint16_t start;
82  int rowstride;
85 } BlockInfo;
86 
87 static void get_colors(uint8_t *min, uint8_t *max, uint8_t color4[4][3])
88 {
89  uint8_t step;
90 
91  color4[0][0] = min[0];
92  color4[0][1] = min[1];
93  color4[0][2] = min[2];
94 
95  color4[3][0] = max[0];
96  color4[3][1] = max[1];
97  color4[3][2] = max[2];
98 
99  // red components
100  step = (color4[3][0] - color4[0][0] + 1) / 3;
101  color4[1][0] = color4[0][0] + step;
102  color4[2][0] = color4[3][0] - step;
103 
104  // green components
105  step = (color4[3][1] - color4[0][1] + 1) / 3;
106  color4[1][1] = color4[0][1] + step;
107  color4[2][1] = color4[3][1] - step;
108 
109  // blue components
110  step = (color4[3][2] - color4[0][2] + 1) / 3;
111  color4[1][2] = color4[0][2] + step;
112  color4[2][2] = color4[3][2] - step;
113 }
114 
115 /* Fill BlockInfo struct with information about a 4x4 block of the image */
116 static int get_block_info(BlockInfo *bi, int block)
117 {
118  bi->row = block / bi->blocks_per_row;
119  bi->col = block % bi->blocks_per_row;
120 
121  // test for right edge block
122  if (bi->col == bi->blocks_per_row - 1 && (bi->image_width % 4) != 0) {
123  bi->block_width = bi->image_width % 4;
124  } else {
125  bi->block_width = 4;
126  }
127 
128  // test for bottom edge block
129  if (bi->row == (bi->image_height / 4) && (bi->image_height % 4) != 0) {
130  bi->block_height = bi->image_height % 4;
131  } else {
132  bi->block_height = 4;
133  }
134 
135  return block ? (bi->col * 4) + (bi->row * bi->rowstride * 4) : 0;
136 }
137 
138 static uint16_t rgb24_to_rgb555(uint8_t *rgb24)
139 {
140  uint16_t rgb555 = 0;
141  uint32_t r, g, b;
142 
143  r = rgb24[0] >> 3;
144  g = rgb24[1] >> 3;
145  b = rgb24[2] >> 3;
146 
147  rgb555 |= (r << 10);
148  rgb555 |= (g << 5);
149  rgb555 |= (b << 0);
150 
151  return rgb555;
152 }
153 
154 /*
155  * Returns the total difference between two 24 bit color values
156  */
157 static int diff_colors(uint8_t *colorA, uint8_t *colorB)
158 {
159  int tot;
160 
161  tot = SQR(colorA[0] - colorB[0]);
162  tot += SQR(colorA[1] - colorB[1]);
163  tot += SQR(colorA[2] - colorB[2]);
164 
165  return tot;
166 }
167 
168 /*
169  * Returns the maximum channel difference
170  */
171 static int max_component_diff(uint16_t *colorA, uint16_t *colorB)
172 {
173  int diff, max = 0;
174 
175  diff = FFABS(R(colorA[0]) - R(colorB[0]));
176  if (diff > max) {
177  max = diff;
178  }
179  diff = FFABS(G(colorA[0]) - G(colorB[0]));
180  if (diff > max) {
181  max = diff;
182  }
183  diff = FFABS(B(colorA[0]) - B(colorB[0]));
184  if (diff > max) {
185  max = diff;
186  }
187  return max * 8;
188 }
189 
190 /*
191  * Find the channel that has the largest difference between minimum and maximum
192  * color values. Put the minimum value in min, maximum in max and the channel
193  * in chan.
194  */
195 static void get_max_component_diff(BlockInfo *bi, uint16_t *block_ptr,
196  uint8_t *min, uint8_t *max, channel_offset *chan)
197 {
198  int x, y;
199  uint8_t min_r, max_r, min_g, max_g, min_b, max_b;
200  uint8_t r, g, b;
201 
202  // fix warning about uninitialized vars
203  min_r = min_g = min_b = UINT8_MAX;
204  max_r = max_g = max_b = 0;
205 
206  // loop thru and compare pixels
207  for (y = 0; y < bi->block_height; y++) {
208  for (x = 0; x < bi->block_width; x++){
209  // TODO: optimize
210  min_r = FFMIN(R(block_ptr[x]), min_r);
211  min_g = FFMIN(G(block_ptr[x]), min_g);
212  min_b = FFMIN(B(block_ptr[x]), min_b);
213 
214  max_r = FFMAX(R(block_ptr[x]), max_r);
215  max_g = FFMAX(G(block_ptr[x]), max_g);
216  max_b = FFMAX(B(block_ptr[x]), max_b);
217  }
218  block_ptr += bi->rowstride;
219  }
220 
221  r = max_r - min_r;
222  g = max_g - min_g;
223  b = max_b - min_b;
224 
225  if (r > g && r > b) {
226  *max = max_r;
227  *min = min_r;
228  *chan = RED;
229  } else if (g > b && g >= r) {
230  *max = max_g;
231  *min = min_g;
232  *chan = GREEN;
233  } else {
234  *max = max_b;
235  *min = min_b;
236  *chan = BLUE;
237  }
238 }
239 
240 /*
241  * Compare two 4x4 blocks to determine if the total difference between the
242  * blocks is greater than the thresh parameter. Returns -1 if difference
243  * exceeds threshold or zero otherwise.
244  */
245 static int compare_blocks(uint16_t *block1, uint16_t *block2, BlockInfo *bi, int thresh)
246 {
247  int x, y, diff = 0;
248  for (y = 0; y < bi->block_height; y++) {
249  for (x = 0; x < bi->block_width; x++) {
250  diff = max_component_diff(&block1[x], &block2[x]);
251  if (diff >= thresh) {
252  return -1;
253  }
254  }
255  block1 += bi->rowstride;
256  block2 += bi->rowstride;
257  }
258  return 0;
259 }
260 
261 /*
262  * Determine the fit of one channel to another within a 4x4 block. This
263  * is used to determine the best palette choices for 4-color encoding.
264  */
265 static int leastsquares(uint16_t *block_ptr, BlockInfo *bi,
266  channel_offset xchannel, channel_offset ychannel,
267  double *slope, double *y_intercept, double *correlation_coef)
268 {
269  double sumx = 0, sumy = 0, sumx2 = 0, sumy2 = 0, sumxy = 0,
270  sumx_sq = 0, sumy_sq = 0, tmp, tmp2;
271  int i, j, count;
272  uint8_t x, y;
273 
274  count = bi->block_height * bi->block_width;
275 
276  if (count < 2)
277  return -1;
278 
279  for (i = 0; i < bi->block_height; i++) {
280  for (j = 0; j < bi->block_width; j++){
281  x = GET_CHAN(block_ptr[j], xchannel);
282  y = GET_CHAN(block_ptr[j], ychannel);
283  sumx += x;
284  sumy += y;
285  sumx2 += x * x;
286  sumy2 += y * y;
287  sumxy += x * y;
288  }
289  block_ptr += bi->rowstride;
290  }
291 
292  sumx_sq = sumx * sumx;
293  tmp = (count * sumx2 - sumx_sq);
294 
295  // guard against div/0
296  if (tmp == 0)
297  return -2;
298 
299  sumy_sq = sumy * sumy;
300 
301  *slope = (sumx * sumy - sumxy) / tmp;
302  *y_intercept = (sumy - (*slope) * sumx) / count;
303 
304  tmp2 = count * sumy2 - sumy_sq;
305  if (tmp2 == 0) {
306  *correlation_coef = 0.0;
307  } else {
308  *correlation_coef = (count * sumxy - sumx * sumy) /
309  sqrt(tmp * tmp2);
310  }
311 
312  return 0; // success
313 }
314 
315 /*
316  * Determine the amount of error in the leastsquares fit.
317  */
318 static int calc_lsq_max_fit_error(uint16_t *block_ptr, BlockInfo *bi,
319  int min, int max, int tmp_min, int tmp_max,
320  channel_offset xchannel, channel_offset ychannel)
321 {
322  int i, j, x, y;
323  int err;
324  int max_err = 0;
325 
326  for (i = 0; i < bi->block_height; i++) {
327  for (j = 0; j < bi->block_width; j++){
328  int x_inc, lin_y, lin_x;
329  x = GET_CHAN(block_ptr[j], xchannel);
330  y = GET_CHAN(block_ptr[j], ychannel);
331 
332  /* calculate x_inc as the 4-color index (0..3) */
333  x_inc = floor( (x - min) * 3.0 / (max - min) + 0.5);
334  x_inc = FFMAX(FFMIN(3, x_inc), 0);
335 
336  /* calculate lin_y corresponding to x_inc */
337  lin_y = (int)(tmp_min + (tmp_max - tmp_min) * x_inc / 3.0 + 0.5);
338 
339  err = FFABS(lin_y - y);
340  if (err > max_err)
341  max_err = err;
342 
343  /* calculate lin_x corresponding to x_inc */
344  lin_x = (int)(min + (max - min) * x_inc / 3.0 + 0.5);
345 
346  err = FFABS(lin_x - x);
347  if (err > max_err)
348  max_err += err;
349  }
350  block_ptr += bi->rowstride;
351  }
352 
353  return max_err;
354 }
355 
356 /*
357  * Find the closest match to a color within the 4-color palette
358  */
359 static int match_color(uint16_t *color, uint8_t colors[4][3])
360 {
361  int ret = 0;
362  int smallest_variance = INT_MAX;
363  uint8_t dithered_color[3];
364 
365  for (int channel = 0; channel < 3; channel++) {
366  dithered_color[channel] = GET_CHAN(color[0], channel);
367  }
368 
369  for (int palette_entry = 0; palette_entry < 4; palette_entry++) {
370  int variance = diff_colors(dithered_color, colors[palette_entry]);
371 
372  if (variance < smallest_variance) {
373  smallest_variance = variance;
374  ret = palette_entry;
375  }
376  }
377 
378  return ret;
379 }
380 
381 /*
382  * Encode a block using the 4-color opcode and palette. return number of
383  * blocks encoded (until we implement multi-block 4 color runs this will
384  * always be 1)
385  */
386 static int encode_four_color_block(uint8_t *min_color, uint8_t *max_color,
387  PutBitContext *pb, uint16_t *block_ptr, BlockInfo *bi)
388 {
389  int x, y, idx;
390  uint8_t color4[4][3];
391  uint16_t rounded_max, rounded_min;
392 
393  // round min and max wider
394  rounded_min = rgb24_to_rgb555(min_color);
395  rounded_max = rgb24_to_rgb555(max_color);
396 
397  // put a and b colors
398  // encode 4 colors = first 16 bit color with MSB zeroed and...
399  put_bits(pb, 16, rounded_max & ~0x8000);
400  // ...second 16 bit color with MSB on.
401  put_bits(pb, 16, rounded_min | 0x8000);
402 
403  get_colors(min_color, max_color, color4);
404 
405  for (y = 0; y < 4; y++) {
406  for (x = 0; x < 4; x++) {
407  idx = match_color(&block_ptr[x], color4);
408  put_bits(pb, 2, idx);
409  }
410  block_ptr += bi->rowstride;
411  }
412  return 1; // num blocks encoded
413 }
414 
415 /*
416  * Copy a 4x4 block from the current frame buffer to the previous frame buffer.
417  */
418 static void update_block_in_prev_frame(const uint16_t *src_pixels,
419  uint16_t *dest_pixels,
420  const BlockInfo *bi, int block_counter)
421 {
422  for (int y = 0; y < 4; y++) {
423  memcpy(dest_pixels, src_pixels, 8);
424  dest_pixels += bi->rowstride;
425  src_pixels += bi->rowstride;
426  }
427 }
428 
429 /*
430  * update statistics for the specified block. If first_block,
431  * it initializes the statistics. Otherwise it updates the statistics IF THIS
432  * BLOCK IS SUITABLE TO CONTINUE A 1-COLOR RUN. That is, it checks whether
433  * the range of colors (since the routine was called first_block != 0) are
434  * all close enough intensities to be represented by a single color.
435 
436  * The routine returns 0 if this block is too different to be part of
437  * the same run of 1-color blocks. The routine returns 1 if this
438  * block can be part of the same 1-color block run.
439 
440  * If the routine returns 1, it also updates its arguments to include
441  * the statistics of this block. Otherwise, the stats are unchanged
442  * and don't include the current block.
443  */
444 static int update_block_stats(RpzaContext *s, BlockInfo *bi, uint16_t *block,
445  uint8_t min_color[3], uint8_t max_color[3],
446  int *total_rgb, int *total_pixels,
447  uint8_t avg_color[3], int first_block)
448 {
449  int x, y;
450  int is_in_range;
451  int total_pixels_blk;
452  int threshold;
453 
454  uint8_t min_color_blk[3], max_color_blk[3];
455  int total_rgb_blk[3];
456  uint8_t avg_color_blk[3];
457 
458  if (first_block) {
459  min_color[0] = UINT8_MAX;
460  min_color[1] = UINT8_MAX;
461  min_color[2] = UINT8_MAX;
462  max_color[0] = 0;
463  max_color[1] = 0;
464  max_color[2] = 0;
465  total_rgb[0] = 0;
466  total_rgb[1] = 0;
467  total_rgb[2] = 0;
468  *total_pixels = 0;
469  threshold = s->start_one_color_thresh;
470  } else {
471  threshold = s->continue_one_color_thresh;
472  }
473 
474  /*
475  The *_blk variables will include the current block.
476  Initialize them based on the blocks so far.
477  */
478  min_color_blk[0] = min_color[0];
479  min_color_blk[1] = min_color[1];
480  min_color_blk[2] = min_color[2];
481  max_color_blk[0] = max_color[0];
482  max_color_blk[1] = max_color[1];
483  max_color_blk[2] = max_color[2];
484  total_rgb_blk[0] = total_rgb[0];
485  total_rgb_blk[1] = total_rgb[1];
486  total_rgb_blk[2] = total_rgb[2];
487  total_pixels_blk = *total_pixels + bi->block_height * bi->block_width;
488 
489  /*
490  Update stats for this block's pixels
491  */
492  for (y = 0; y < bi->block_height; y++) {
493  for (x = 0; x < bi->block_width; x++) {
494  total_rgb_blk[0] += R(block[x]);
495  total_rgb_blk[1] += G(block[x]);
496  total_rgb_blk[2] += B(block[x]);
497 
498  min_color_blk[0] = FFMIN(R(block[x]), min_color_blk[0]);
499  min_color_blk[1] = FFMIN(G(block[x]), min_color_blk[1]);
500  min_color_blk[2] = FFMIN(B(block[x]), min_color_blk[2]);
501 
502  max_color_blk[0] = FFMAX(R(block[x]), max_color_blk[0]);
503  max_color_blk[1] = FFMAX(G(block[x]), max_color_blk[1]);
504  max_color_blk[2] = FFMAX(B(block[x]), max_color_blk[2]);
505  }
506  block += bi->rowstride;
507  }
508 
509  /*
510  Calculate average color including current block.
511  */
512  avg_color_blk[0] = total_rgb_blk[0] / total_pixels_blk;
513  avg_color_blk[1] = total_rgb_blk[1] / total_pixels_blk;
514  avg_color_blk[2] = total_rgb_blk[2] / total_pixels_blk;
515 
516  /*
517  Are all the pixels within threshold of the average color?
518  */
519  is_in_range = (max_color_blk[0] - avg_color_blk[0] <= threshold &&
520  max_color_blk[1] - avg_color_blk[1] <= threshold &&
521  max_color_blk[2] - avg_color_blk[2] <= threshold &&
522  avg_color_blk[0] - min_color_blk[0] <= threshold &&
523  avg_color_blk[1] - min_color_blk[1] <= threshold &&
524  avg_color_blk[2] - min_color_blk[2] <= threshold);
525 
526  if (is_in_range) {
527  /*
528  Set the output variables to include this block.
529  */
530  min_color[0] = min_color_blk[0];
531  min_color[1] = min_color_blk[1];
532  min_color[2] = min_color_blk[2];
533  max_color[0] = max_color_blk[0];
534  max_color[1] = max_color_blk[1];
535  max_color[2] = max_color_blk[2];
536  total_rgb[0] = total_rgb_blk[0];
537  total_rgb[1] = total_rgb_blk[1];
538  total_rgb[2] = total_rgb_blk[2];
539  *total_pixels = total_pixels_blk;
540  avg_color[0] = avg_color_blk[0];
541  avg_color[1] = avg_color_blk[1];
542  avg_color[2] = avg_color_blk[2];
543  }
544 
545  return is_in_range;
546 }
547 
548 static void rpza_encode_stream(RpzaContext *s, const AVFrame *pict)
549 {
550  BlockInfo bi;
551  int block_counter = 0;
552  int n_blocks;
553  int total_blocks;
554  int prev_block_offset;
555  int block_offset = 0;
556  uint8_t min = 0, max = 0;
557  channel_offset chan;
558  int i;
559  int tmp_min, tmp_max;
560  int total_rgb[3];
561  uint8_t avg_color[3];
562  int pixel_count;
563  uint8_t min_color[3], max_color[3];
564  double slope, y_intercept, correlation_coef;
565  uint16_t *src_pixels = (uint16_t *)pict->data[0];
566  uint16_t *prev_pixels = (uint16_t *)s->prev_frame->data[0];
567 
568  /* Number of 4x4 blocks in frame. */
569  total_blocks = ((s->frame_width + 3) / 4) * ((s->frame_height + 3) / 4);
570 
571  bi.image_width = s->frame_width;
572  bi.image_height = s->frame_height;
573  bi.rowstride = pict->linesize[0] / 2;
574 
575  bi.blocks_per_row = (s->frame_width + 3) / 4;
576 
577  while (block_counter < total_blocks) {
578  // SKIP CHECK
579  // make sure we have a valid previous frame and we're not writing
580  // a key frame
581  if (!s->first_frame) {
582  n_blocks = 0;
583  prev_block_offset = 0;
584 
585  while (n_blocks < 32 && block_counter + n_blocks < total_blocks) {
586 
587  block_offset = get_block_info(&bi, block_counter + n_blocks);
588 
589  // multi-block opcodes cannot span multiple rows.
590  // If we're starting a new row, break out and write the opcode
591  /* TODO: Should eventually use bi.row here to determine when a
592  row break occurs, but that is currently breaking the
593  quicktime player. This is probably due to a bug in the
594  way I'm calculating the current row.
595  */
596  if (prev_block_offset && block_offset - prev_block_offset > 12) {
597  break;
598  }
599 
600  prev_block_offset = block_offset;
601 
602  if (compare_blocks(&prev_pixels[block_offset],
603  &src_pixels[block_offset], &bi, s->skip_frame_thresh) != 0) {
604  // write out skipable blocks
605  if (n_blocks) {
606 
607  // write skip opcode
608  put_bits(&s->pb, 8, 0x80 | (n_blocks - 1));
609  block_counter += n_blocks;
610 
611  goto post_skip;
612  }
613  break;
614  }
615 
616  /*
617  * NOTE: we don't update skipped blocks in the previous frame buffer
618  * since skipped needs always to be compared against the first skipped
619  * block to avoid artifacts during gradual fade in/outs.
620  */
621 
622  // update_block_in_prev_frame(&src_pixels[block_offset],
623  // &prev_pixels[block_offset], &bi, block_counter + n_blocks);
624 
625  n_blocks++;
626  }
627 
628  // we're either at the end of the frame or we've reached the maximum
629  // of 32 blocks in a run. Write out the run.
630  if (n_blocks) {
631  // write skip opcode
632  put_bits(&s->pb, 8, 0x80 | (n_blocks - 1));
633  block_counter += n_blocks;
634 
635  continue;
636  }
637 
638  } else {
639  block_offset = get_block_info(&bi, block_counter);
640  }
641 post_skip :
642 
643  // ONE COLOR CHECK
644  if (update_block_stats(s, &bi, &src_pixels[block_offset],
645  min_color, max_color,
646  total_rgb, &pixel_count, avg_color, 1)) {
647  prev_block_offset = block_offset;
648 
649  n_blocks = 1;
650 
651  /* update this block in the previous frame buffer */
652  update_block_in_prev_frame(&src_pixels[block_offset],
653  &prev_pixels[block_offset], &bi, block_counter + n_blocks);
654 
655  // check for subsequent blocks with the same color
656  while (n_blocks < 32 && block_counter + n_blocks < total_blocks) {
657  block_offset = get_block_info(&bi, block_counter + n_blocks);
658 
659  // multi-block opcodes cannot span multiple rows.
660  // If we've hit end of a row, break out and write the opcode
661  if (block_offset - prev_block_offset > 12) {
662  break;
663  }
664 
665  if (!update_block_stats(s, &bi, &src_pixels[block_offset],
666  min_color, max_color,
667  total_rgb, &pixel_count, avg_color, 0)) {
668  break;
669  }
670 
671  prev_block_offset = block_offset;
672 
673  /* update this block in the previous frame buffer */
674  update_block_in_prev_frame(&src_pixels[block_offset],
675  &prev_pixels[block_offset], &bi, block_counter + n_blocks);
676 
677  n_blocks++;
678  }
679 
680  // write one color opcode.
681  put_bits(&s->pb, 8, 0xa0 | (n_blocks - 1));
682  // write color to encode.
683  put_bits(&s->pb, 16, rgb24_to_rgb555(avg_color));
684  // skip past the blocks we've just encoded.
685  block_counter += n_blocks;
686  } else { // FOUR COLOR CHECK
687  int err = 0;
688 
689  // get max component diff for block
690  get_max_component_diff(&bi, &src_pixels[block_offset], &min, &max, &chan);
691 
692  min_color[0] = 0;
693  max_color[0] = 0;
694  min_color[1] = 0;
695  max_color[1] = 0;
696  min_color[2] = 0;
697  max_color[2] = 0;
698 
699  // run least squares against other two components
700  for (i = 0; i < 3; i++) {
701  if (i == chan) {
702  min_color[i] = min;
703  max_color[i] = max;
704  continue;
705  }
706 
707  slope = y_intercept = correlation_coef = 0;
708 
709  if (leastsquares(&src_pixels[block_offset], &bi, chan, i,
710  &slope, &y_intercept, &correlation_coef)) {
711  min_color[i] = GET_CHAN(src_pixels[block_offset], i);
712  max_color[i] = GET_CHAN(src_pixels[block_offset], i);
713  } else {
714  tmp_min = (int)(0.5 + min * slope + y_intercept);
715  tmp_max = (int)(0.5 + max * slope + y_intercept);
716 
717  av_assert0(tmp_min <= tmp_max);
718  // clamp min and max color values
719  tmp_min = av_clip_uint8(tmp_min);
720  tmp_max = av_clip_uint8(tmp_max);
721 
722  err = FFMAX(calc_lsq_max_fit_error(&src_pixels[block_offset], &bi,
723  min, max, tmp_min, tmp_max, chan, i), err);
724 
725  min_color[i] = tmp_min;
726  max_color[i] = tmp_max;
727  }
728  }
729 
730  if (err > s->sixteen_color_thresh) { // DO SIXTEEN COLOR BLOCK
731  uint16_t *row_ptr;
732  int rgb555;
733 
734  block_offset = get_block_info(&bi, block_counter);
735 
736  row_ptr = &src_pixels[block_offset];
737 
738  for (int y = 0; y < 4; y++) {
739  for (int x = 0; x < 4; x++){
740  rgb555 = row_ptr[x] & ~0x8000;
741 
742  put_bits(&s->pb, 16, rgb555);
743  }
744  row_ptr += bi.rowstride;
745  }
746 
747  block_counter++;
748  } else { // FOUR COLOR BLOCK
749  block_counter += encode_four_color_block(min_color, max_color,
750  &s->pb, &src_pixels[block_offset], &bi);
751  }
752 
753  /* update this block in the previous frame buffer */
754  update_block_in_prev_frame(&src_pixels[block_offset],
755  &prev_pixels[block_offset], &bi, block_counter);
756  }
757  }
758 }
759 
761 {
762  RpzaContext *s = avctx->priv_data;
763 
764  s->frame_width = avctx->width;
765  s->frame_height = avctx->height;
766 
767  s->prev_frame = av_frame_alloc();
768  if (!s->prev_frame)
769  return AVERROR(ENOMEM);
770 
771  return 0;
772 }
773 
775  const AVFrame *frame, int *got_packet)
776 {
777  RpzaContext *s = avctx->priv_data;
778  const AVFrame *pict = frame;
779  uint8_t *buf;
780  int ret = ff_alloc_packet(avctx, pkt, 6LL * avctx->height * avctx->width);
781 
782  if (ret < 0)
783  return ret;
784 
785  init_put_bits(&s->pb, pkt->data, pkt->size);
786 
787  // skip 4 byte header, write it later once the size of the chunk is known
788  put_bits32(&s->pb, 0x00);
789 
790  if (!s->prev_frame->data[0]) {
791  s->first_frame = 1;
792  s->prev_frame->format = pict->format;
793  s->prev_frame->width = pict->width;
794  s->prev_frame->height = pict->height;
795  ret = av_frame_get_buffer(s->prev_frame, 0);
796  if (ret < 0)
797  return ret;
798  } else {
799  s->first_frame = 0;
800  }
801 
802  rpza_encode_stream(s, pict);
803 
804  flush_put_bits(&s->pb);
805 
807  buf = pkt->data;
808 
809  // write header opcode
810  buf[0] = 0xe1; // chunk opcode
811 
812  // write chunk length
813  AV_WB24(buf + 1, pkt->size);
814 
815  *got_packet = 1;
816 
817  return 0;
818 }
819 
820 static int rpza_encode_end(AVCodecContext *avctx)
821 {
822  RpzaContext *s = (RpzaContext *)avctx->priv_data;
823 
824  av_frame_free(&s->prev_frame);
825 
826  return 0;
827 }
828 
829 #define OFFSET(x) offsetof(RpzaContext, x)
830 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
831 static const AVOption options[] = {
832  { "skip_frame_thresh", NULL, OFFSET(skip_frame_thresh), AV_OPT_TYPE_INT, {.i64=1}, 0, 24, VE},
833  { "start_one_color_thresh", NULL, OFFSET(start_one_color_thresh), AV_OPT_TYPE_INT, {.i64=1}, 0, 24, VE},
834  { "continue_one_color_thresh", NULL, OFFSET(continue_one_color_thresh), AV_OPT_TYPE_INT, {.i64=0}, 0, 24, VE},
835  { "sixteen_color_thresh", NULL, OFFSET(sixteen_color_thresh), AV_OPT_TYPE_INT, {.i64=1}, 0, 24, VE},
836  { NULL },
837 };
838 
839 static const AVClass rpza_class = {
840  .class_name = "rpza",
841  .item_name = av_default_item_name,
842  .option = options,
843  .version = LIBAVUTIL_VERSION_INT,
844 };
845 
847  .name = "rpza",
848  .long_name = NULL_IF_CONFIG_SMALL("QuickTime video (RPZA)"),
849  .type = AVMEDIA_TYPE_VIDEO,
850  .id = AV_CODEC_ID_RPZA,
851  .priv_data_size = sizeof(RpzaContext),
852  .priv_class = &rpza_class,
854  .encode2 = rpza_encode_frame,
855  .close = rpza_encode_end,
856  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
857  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_RGB555,
859 };
OFFSET
#define OFFSET(x)
Definition: rpzaenc.c:829
AVCodec
AVCodec.
Definition: codec.h:202
rgb::b
uint8_t b
Definition: rpzaenc.c:62
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
RED
@ RED
Definition: rpzaenc.c:54
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:290
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:88
color
Definition: vf_paletteuse.c:599
BlockInfo::block_index
int block_index
Definition: rpzaenc.c:80
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVFrame::width
int width
Definition: frame.h:389
RpzaContext::first_frame
int first_frame
Definition: rpzaenc.c:49
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
BlockInfo::start
uint16_t start
Definition: rpzaenc.c:81
AVOption
AVOption.
Definition: opt.h:247
encode.h
b
#define b
Definition: input.c:40
RpzaContext::skip_frame_thresh
int skip_frame_thresh
Definition: rpzaenc.c:38
B
#define B(color)
Definition: rpzaenc.c:71
RpzaContext
Definition: rpza.c:47
max
#define max(a, b)
Definition: cuda_runtime.h:33
BlockInfo::row
int row
Definition: rpzaenc.c:74
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
BlockInfo::rowstride
int rowstride
Definition: rpzaenc.c:82
SQR
#define SQR(x)
Definition: rpzaenc.c:65
BlockInfo::image_width
int image_width
Definition: rpzaenc.c:78
GREEN
@ GREEN
Definition: rpzaenc.c:55
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
init
static int init
Definition: av_tx.c:47
rgb
Definition: rpzaenc.c:59
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: avpacket.c:114
get_colors
static void get_colors(uint8_t *min, uint8_t *max, uint8_t color4[4][3])
Definition: rpzaenc.c:87
BlockInfo::col
int col
Definition: rpzaenc.c:75
get_block_info
static int get_block_info(BlockInfo *bi, int block)
Definition: rpzaenc.c:116
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
update_block_in_prev_frame
static void update_block_in_prev_frame(const uint16_t *src_pixels, uint16_t *dest_pixels, const BlockInfo *bi, int block_counter)
Definition: rpzaenc.c:418
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
BlockInfo::image_height
int image_height
Definition: rpzaenc.c:79
compare_blocks
static int compare_blocks(uint16_t *block1, uint16_t *block2, BlockInfo *bi, int thresh)
Definition: rpzaenc.c:245
s
#define s(width, name)
Definition: cbs_vp9.c:257
encode_four_color_block
static int encode_four_color_block(uint8_t *min_color, uint8_t *max_color, PutBitContext *pb, uint16_t *block_ptr, BlockInfo *bi)
Definition: rpzaenc.c:386
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
ff_rpza_encoder
const AVCodec ff_rpza_encoder
Definition: rpzaenc.c:846
g
const char * g
Definition: vf_curves.c:117
rpza_encode_end
static int rpza_encode_end(AVCodecContext *avctx)
Definition: rpzaenc.c:820
rpza_encode_init
static int rpza_encode_init(AVCodecContext *avctx)
Definition: rpzaenc.c:760
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
RpzaContext::avclass
AVClass * avclass
Definition: rpzaenc.c:36
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
BlockInfo::block_height
int block_height
Definition: rpzaenc.c:77
G
#define G(color)
Definition: rpzaenc.c:70
PutBitContext
Definition: put_bits.h:49
BlockInfo::blocks_per_row
int blocks_per_row
Definition: rpzaenc.c:83
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
BLUE
@ BLUE
Definition: rpzaenc.c:56
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
rpza_class
static const AVClass rpza_class
Definition: rpzaenc.c:839
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
VE
#define VE
Definition: rpzaenc.c:830
BlockInfo
Definition: dvdec.c:55
leastsquares
static int leastsquares(uint16_t *block_ptr, BlockInfo *bi, channel_offset xchannel, channel_offset ychannel, double *slope, double *y_intercept, double *correlation_coef)
Definition: rpzaenc.c:265
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
RpzaContext::start_one_color_thresh
int start_one_color_thresh
Definition: rpzaenc.c:39
AV_WB24
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
calc_lsq_max_fit_error
static int calc_lsq_max_fit_error(uint16_t *block_ptr, BlockInfo *bi, int min, int max, int tmp_min, int tmp_max, channel_offset xchannel, channel_offset ychannel)
Definition: rpzaenc.c:318
RpzaContext::frame_height
int frame_height
Definition: rpzaenc.c:47
rgb::g
uint8_t g
Definition: rpzaenc.c:61
channel_offset
channel_offset
Definition: rpzaenc.c:53
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
RpzaContext::pb
PutBitContext pb
Definition: rpzaenc.c:44
common.h
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:392
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
rgb::r
uint8_t r
Definition: rpzaenc.c:60
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::height
int height
Definition: avcodec.h:556
get_max_component_diff
static void get_max_component_diff(BlockInfo *bi, uint16_t *block_ptr, uint8_t *min, uint8_t *max, channel_offset *chan)
Definition: rpzaenc.c:195
avcodec.h
ret
ret
Definition: filter_design.txt:187
rgb24_to_rgb555
static uint16_t rgb24_to_rgb555(uint8_t *rgb24)
Definition: rpzaenc.c:138
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVFrame::height
int height
Definition: frame.h:389
BlockInfo::total_blocks
int total_blocks
Definition: rpzaenc.c:84
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
options
static const AVOption options[]
Definition: rpzaenc.c:831
BlockInfo::block_width
int block_width
Definition: rpzaenc.c:76
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
RpzaContext::frame_width
int frame_width
Definition: rpzaenc.c:46
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_ID_RPZA
@ AV_CODEC_ID_RPZA
Definition: codec_id.h:92
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
match_color
static int match_color(uint16_t *color, uint8_t colors[4][3])
Definition: rpzaenc.c:359
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
R
#define R(color)
Definition: rpzaenc.c:69
rpza_encode_stream
static void rpza_encode_stream(RpzaContext *s, const AVFrame *pict)
Definition: rpzaenc.c:548
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
update_block_stats
static int update_block_stats(RpzaContext *s, BlockInfo *bi, uint16_t *block, uint8_t min_color[3], uint8_t max_color[3], int *total_rgb, int *total_pixels, uint8_t avg_color[3], int first_block)
Definition: rpzaenc.c:444
diff_colors
static int diff_colors(uint8_t *colorA, uint8_t *colorB)
Definition: rpzaenc.c:157
int
int
Definition: ffmpeg_filter.c:153
put_bits.h
rpza_encode_frame
static int rpza_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: rpzaenc.c:774
RpzaContext::prev_frame
AVFrame * prev_frame
Definition: rpzaenc.c:43
block1
static int16_t block1[64]
Definition: dct.c:117
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:34
RpzaContext::sixteen_color_thresh
int sixteen_color_thresh
Definition: rpzaenc.c:41
max_component_diff
static int max_component_diff(uint16_t *colorA, uint16_t *colorB)
Definition: rpzaenc.c:171
RpzaContext::continue_one_color_thresh
int continue_one_color_thresh
Definition: rpzaenc.c:40
channel
channel
Definition: ebur128.h:39
min
float min
Definition: vorbis_enc_data.h:429
GET_CHAN
#define GET_CHAN(color, chan)
Definition: rpzaenc.c:68