FFmpeg
rpzaenc.c
Go to the documentation of this file.
1 /*
2  * QuickTime RPZA Video Encoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file rpzaenc.c
23  * QT RPZA Video Encoder by Todd Kirby <doubleshot@pacbell.net> and David Adler
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/common.h"
28 #include "libavutil/opt.h"
29 
30 #include "avcodec.h"
31 #include "codec_internal.h"
32 #include "encode.h"
33 #include "mathops.h"
34 #include "put_bits.h"
35 
36 typedef struct RpzaContext {
38 
43 
44  AVFrame *prev_frame; // buffer for previous source frame
45  PutBitContext pb; // buffer for encoded frame data.
46 
47  int frame_width; // width in pixels of source frame
48  int frame_height; // height in pixesl of source frame
49 
50  int first_frame; // flag set to one when the first frame is being processed
51  // so that comparisons with previous frame data in not attempted
52 } RpzaContext;
53 
54 typedef enum channel_offset {
55  RED = 2,
56  GREEN = 1,
57  BLUE = 0,
59 
60 typedef struct rgb {
61  uint8_t r;
62  uint8_t g;
63  uint8_t b;
64 } rgb;
65 
66 #define SQR(x) ((x) * (x))
67 
68 /* 15 bit components */
69 #define GET_CHAN(color, chan) (((color) >> ((chan) * 5) & 0x1F))
70 #define R(color) GET_CHAN(color, RED)
71 #define G(color) GET_CHAN(color, GREEN)
72 #define B(color) GET_CHAN(color, BLUE)
73 
74 typedef struct BlockInfo {
75  int row;
76  int col;
82  uint16_t start;
83  int rowstride;
87 } BlockInfo;
88 
89 static void get_colors(const uint8_t *min, const uint8_t *max, uint8_t color4[4][3])
90 {
91  uint8_t step;
92 
93  color4[0][0] = min[0];
94  color4[0][1] = min[1];
95  color4[0][2] = min[2];
96 
97  color4[3][0] = max[0];
98  color4[3][1] = max[1];
99  color4[3][2] = max[2];
100 
101  // red components
102  step = (color4[3][0] - color4[0][0] + 1) / 3;
103  color4[1][0] = color4[0][0] + step;
104  color4[2][0] = color4[3][0] - step;
105 
106  // green components
107  step = (color4[3][1] - color4[0][1] + 1) / 3;
108  color4[1][1] = color4[0][1] + step;
109  color4[2][1] = color4[3][1] - step;
110 
111  // blue components
112  step = (color4[3][2] - color4[0][2] + 1) / 3;
113  color4[1][2] = color4[0][2] + step;
114  color4[2][2] = color4[3][2] - step;
115 }
116 
117 /* Fill BlockInfo struct with information about a 4x4 block of the image */
118 static int get_block_info(BlockInfo *bi, int block, int prev_frame)
119 {
120  bi->row = block / bi->blocks_per_row;
121  bi->col = block % bi->blocks_per_row;
122 
123  // test for right edge block
124  if (bi->col == bi->blocks_per_row - 1 && (bi->image_width % 4) != 0) {
125  bi->block_width = bi->image_width % 4;
126  } else {
127  bi->block_width = 4;
128  }
129 
130  // test for bottom edge block
131  if (bi->row == (bi->image_height / 4) && (bi->image_height % 4) != 0) {
132  bi->block_height = bi->image_height % 4;
133  } else {
134  bi->block_height = 4;
135  }
136 
137  return block ? (bi->col * 4) + (bi->row * (prev_frame ? bi->prev_rowstride : bi->rowstride) * 4) : 0;
138 }
139 
140 static uint16_t rgb24_to_rgb555(const uint8_t *rgb24)
141 {
142  uint16_t rgb555 = 0;
143  uint32_t r, g, b;
144 
145  r = rgb24[0];
146  g = rgb24[1];
147  b = rgb24[2];
148 
149  rgb555 |= (r << 10);
150  rgb555 |= (g << 5);
151  rgb555 |= (b << 0);
152 
153  return rgb555;
154 }
155 
156 /*
157  * Returns the total difference between two 24 bit color values
158  */
159 static int diff_colors(const uint8_t *colorA, const uint8_t *colorB)
160 {
161  int tot;
162 
163  tot = SQR(colorA[0] - colorB[0]);
164  tot += SQR(colorA[1] - colorB[1]);
165  tot += SQR(colorA[2] - colorB[2]);
166 
167  return tot;
168 }
169 
170 /*
171  * Returns the maximum channel difference
172  */
173 static int max_component_diff(const uint16_t *colorA, const uint16_t *colorB)
174 {
175  int diff, max = 0;
176 
177  diff = FFABS(R(colorA[0]) - R(colorB[0]));
178  if (diff > max) {
179  max = diff;
180  }
181  diff = FFABS(G(colorA[0]) - G(colorB[0]));
182  if (diff > max) {
183  max = diff;
184  }
185  diff = FFABS(B(colorA[0]) - B(colorB[0]));
186  if (diff > max) {
187  max = diff;
188  }
189  return max;
190 }
191 
192 /*
193  * Find the channel that has the largest difference between minimum and maximum
194  * color values. Put the minimum value in min, maximum in max and the channel
195  * in chan.
196  */
197 static void get_max_component_diff(const BlockInfo *bi, const uint16_t *block_ptr,
198  uint8_t *min, uint8_t *max, channel_offset *chan)
199 {
200  int x, y;
201  uint8_t min_r, max_r, min_g, max_g, min_b, max_b;
202  uint8_t r, g, b;
203 
204  // fix warning about uninitialized vars
205  min_r = min_g = min_b = UINT8_MAX;
206  max_r = max_g = max_b = 0;
207 
208  // loop thru and compare pixels
209  for (y = 0; y < bi->block_height; y++) {
210  for (x = 0; x < bi->block_width; x++) {
211  // TODO: optimize
212  min_r = FFMIN(R(block_ptr[x]), min_r);
213  min_g = FFMIN(G(block_ptr[x]), min_g);
214  min_b = FFMIN(B(block_ptr[x]), min_b);
215 
216  max_r = FFMAX(R(block_ptr[x]), max_r);
217  max_g = FFMAX(G(block_ptr[x]), max_g);
218  max_b = FFMAX(B(block_ptr[x]), max_b);
219  }
220  block_ptr += bi->rowstride;
221  }
222 
223  r = max_r - min_r;
224  g = max_g - min_g;
225  b = max_b - min_b;
226 
227  if (r > g && r > b) {
228  *max = max_r;
229  *min = min_r;
230  *chan = RED;
231  } else if (g > b && g >= r) {
232  *max = max_g;
233  *min = min_g;
234  *chan = GREEN;
235  } else {
236  *max = max_b;
237  *min = min_b;
238  *chan = BLUE;
239  }
240 }
241 
242 /*
243  * Compare two 4x4 blocks to determine if the total difference between the
244  * blocks is greater than the thresh parameter. Returns -1 if difference
245  * exceeds threshold or zero otherwise.
246  */
247 static int compare_blocks(const uint16_t *block1, const uint16_t *block2,
248  const BlockInfo *bi, int thresh)
249 {
250  int x, y, diff = 0;
251  for (y = 0; y < bi->block_height; y++) {
252  for (x = 0; x < bi->block_width; x++) {
253  diff = max_component_diff(&block1[x], &block2[x]);
254  if (diff >= thresh) {
255  return -1;
256  }
257  }
258  block1 += bi->prev_rowstride;
259  block2 += bi->rowstride;
260  }
261  return 0;
262 }
263 
264 /*
265  * Determine the fit of one channel to another within a 4x4 block. This
266  * is used to determine the best palette choices for 4-color encoding.
267  */
268 static int leastsquares(const uint16_t *block_ptr, const BlockInfo *bi,
269  channel_offset xchannel, channel_offset ychannel,
270  int *slope, int *y_intercept, int *correlation_coef)
271 {
272  int sumx = 0, sumy = 0, sumx2 = 0, sumy2 = 0, sumxy = 0,
273  sumx_sq = 0, sumy_sq = 0, tmp, tmp2;
274  int i, j, count;
275  uint8_t x, y;
276 
277  count = bi->block_height * bi->block_width;
278 
279  if (count < 2)
280  return -1;
281 
282  for (i = 0; i < bi->block_height; i++) {
283  for (j = 0; j < bi->block_width; j++) {
284  x = GET_CHAN(block_ptr[j], xchannel);
285  y = GET_CHAN(block_ptr[j], ychannel);
286  sumx += x;
287  sumy += y;
288  sumx2 += x * x;
289  sumy2 += y * y;
290  sumxy += x * y;
291  }
292  block_ptr += bi->rowstride;
293  }
294 
295  sumx_sq = sumx * sumx;
296  tmp = (count * sumx2 - sumx_sq);
297 
298  // guard against div/0
299  if (tmp == 0)
300  return -2;
301 
302  sumy_sq = sumy * sumy;
303 
304  *slope = (sumx * sumy - sumxy) / tmp;
305  *y_intercept = (sumy - (*slope) * sumx) / count;
306 
307  tmp2 = count * sumy2 - sumy_sq;
308  if (tmp2 == 0) {
309  *correlation_coef = 0;
310  } else {
311  *correlation_coef = (count * sumxy - sumx * sumy) /
312  ff_sqrt((unsigned)tmp * tmp2);
313  }
314 
315  return 0; // success
316 }
317 
318 /*
319  * Determine the amount of error in the leastsquares fit.
320  */
321 static int calc_lsq_max_fit_error(const uint16_t *block_ptr, const BlockInfo *bi,
322  int min, int max, int tmp_min, int tmp_max,
323  channel_offset xchannel, channel_offset ychannel)
324 {
325  int i, j, x, y;
326  int err;
327  int max_err = 0;
328 
329  for (i = 0; i < bi->block_height; i++) {
330  for (j = 0; j < bi->block_width; j++) {
331  int x_inc, lin_y, lin_x;
332  x = GET_CHAN(block_ptr[j], xchannel);
333  y = GET_CHAN(block_ptr[j], ychannel);
334 
335  /* calculate x_inc as the 4-color index (0..3) */
336  x_inc = (x - min) * 3 / (max - min) + 1;
337  x_inc = FFMAX(FFMIN(3, x_inc), 0);
338 
339  /* calculate lin_y corresponding to x_inc */
340  lin_y = tmp_min + (tmp_max - tmp_min) * x_inc / 3 + 1;
341 
342  err = FFABS(lin_y - y);
343  if (err > max_err)
344  max_err = err;
345 
346  /* calculate lin_x corresponding to x_inc */
347  lin_x = min + (max - min) * x_inc / 3 + 1;
348 
349  err = FFABS(lin_x - x);
350  if (err > max_err)
351  max_err += err;
352  }
353  block_ptr += bi->rowstride;
354  }
355 
356  return max_err;
357 }
358 
359 /*
360  * Find the closest match to a color within the 4-color palette
361  */
362 static int match_color(const uint16_t *color, uint8_t colors[4][3])
363 {
364  int ret = 0;
365  int smallest_variance = INT_MAX;
366  uint8_t dithered_color[3];
367 
368  for (int channel = 0; channel < 3; channel++) {
369  dithered_color[channel] = GET_CHAN(color[0], channel);
370  }
371 
372  for (int palette_entry = 0; palette_entry < 4; palette_entry++) {
373  int variance = diff_colors(dithered_color, colors[palette_entry]);
374 
375  if (variance < smallest_variance) {
376  smallest_variance = variance;
377  ret = palette_entry;
378  }
379  }
380 
381  return ret;
382 }
383 
384 /*
385  * Encode a block using the 4-color opcode and palette. return number of
386  * blocks encoded (until we implement multi-block 4 color runs this will
387  * always be 1)
388  */
389 static int encode_four_color_block(const uint8_t *min_color, const uint8_t *max_color,
390  PutBitContext *pb, const uint16_t *block_ptr, const BlockInfo *bi)
391 {
392  const int y_size = FFMIN(4, bi->image_height - bi->row * 4);
393  const int x_size = FFMIN(4, bi->image_width - bi->col * 4);
394  uint8_t color4[4][3];
395  uint16_t rounded_max, rounded_min;
396  int idx;
397 
398  // round min and max wider
399  rounded_min = rgb24_to_rgb555(min_color);
400  rounded_max = rgb24_to_rgb555(max_color);
401 
402  // put a and b colors
403  // encode 4 colors = first 16 bit color with MSB zeroed and...
404  put_bits(pb, 16, rounded_max & ~0x8000);
405  // ...second 16 bit color with MSB on.
406  put_bits(pb, 16, rounded_min | 0x8000);
407 
408  get_colors(min_color, max_color, color4);
409 
410  for (int y = 0; y < y_size; y++) {
411  for (int x = 0; x < x_size; x++) {
412  idx = match_color(&block_ptr[x], color4);
413  put_bits(pb, 2, idx);
414  }
415 
416  for (int x = x_size; x < 4; x++)
417  put_bits(pb, 2, idx);
418  block_ptr += bi->rowstride;
419  }
420 
421  for (int y = y_size; y < 4; y++) {
422  for (int x = 0; x < 4; x++)
423  put_bits(pb, 2, 0);
424  }
425  return 1; // num blocks encoded
426 }
427 
428 /*
429  * Copy a 4x4 block from the current frame buffer to the previous frame buffer.
430  */
431 static void update_block_in_prev_frame(const uint16_t *src_pixels,
432  uint16_t *dest_pixels,
433  const BlockInfo *bi, int block_counter)
434 {
435  const int y_size = FFMIN(4, bi->image_height - bi->row * 4);
436  const int x_size = FFMIN(4, bi->image_width - bi->col * 4) * 2;
437 
438  for (int y = 0; y < y_size; y++) {
439  memcpy(dest_pixels, src_pixels, x_size);
440  dest_pixels += bi->prev_rowstride;
441  src_pixels += bi->rowstride;
442  }
443 }
444 
445 /*
446  * update statistics for the specified block. If first_block,
447  * it initializes the statistics. Otherwise it updates the statistics IF THIS
448  * BLOCK IS SUITABLE TO CONTINUE A 1-COLOR RUN. That is, it checks whether
449  * the range of colors (since the routine was called first_block != 0) are
450  * all close enough intensities to be represented by a single color.
451 
452  * The routine returns 0 if this block is too different to be part of
453  * the same run of 1-color blocks. The routine returns 1 if this
454  * block can be part of the same 1-color block run.
455 
456  * If the routine returns 1, it also updates its arguments to include
457  * the statistics of this block. Otherwise, the stats are unchanged
458  * and don't include the current block.
459  */
460 static int update_block_stats(RpzaContext *s, const BlockInfo *bi, const uint16_t *block,
461  uint8_t min_color[3], uint8_t max_color[3],
462  int *total_rgb, int *total_pixels,
463  uint8_t avg_color[3], int first_block)
464 {
465  int x, y;
466  int is_in_range;
467  int total_pixels_blk;
468  int threshold;
469 
470  uint8_t min_color_blk[3], max_color_blk[3];
471  int total_rgb_blk[3];
472  uint8_t avg_color_blk[3];
473 
474  if (first_block) {
475  min_color[0] = UINT8_MAX;
476  min_color[1] = UINT8_MAX;
477  min_color[2] = UINT8_MAX;
478  max_color[0] = 0;
479  max_color[1] = 0;
480  max_color[2] = 0;
481  total_rgb[0] = 0;
482  total_rgb[1] = 0;
483  total_rgb[2] = 0;
484  *total_pixels = 0;
485  threshold = s->start_one_color_thresh;
486  } else {
487  threshold = s->continue_one_color_thresh;
488  }
489 
490  /*
491  The *_blk variables will include the current block.
492  Initialize them based on the blocks so far.
493  */
494  min_color_blk[0] = min_color[0];
495  min_color_blk[1] = min_color[1];
496  min_color_blk[2] = min_color[2];
497  max_color_blk[0] = max_color[0];
498  max_color_blk[1] = max_color[1];
499  max_color_blk[2] = max_color[2];
500  total_rgb_blk[0] = total_rgb[0];
501  total_rgb_blk[1] = total_rgb[1];
502  total_rgb_blk[2] = total_rgb[2];
503  total_pixels_blk = *total_pixels + bi->block_height * bi->block_width;
504 
505  /*
506  Update stats for this block's pixels
507  */
508  for (y = 0; y < bi->block_height; y++) {
509  for (x = 0; x < bi->block_width; x++) {
510  total_rgb_blk[0] += R(block[x]);
511  total_rgb_blk[1] += G(block[x]);
512  total_rgb_blk[2] += B(block[x]);
513 
514  min_color_blk[0] = FFMIN(R(block[x]), min_color_blk[0]);
515  min_color_blk[1] = FFMIN(G(block[x]), min_color_blk[1]);
516  min_color_blk[2] = FFMIN(B(block[x]), min_color_blk[2]);
517 
518  max_color_blk[0] = FFMAX(R(block[x]), max_color_blk[0]);
519  max_color_blk[1] = FFMAX(G(block[x]), max_color_blk[1]);
520  max_color_blk[2] = FFMAX(B(block[x]), max_color_blk[2]);
521  }
522  block += bi->rowstride;
523  }
524 
525  /*
526  Calculate average color including current block.
527  */
528  avg_color_blk[0] = total_rgb_blk[0] / total_pixels_blk;
529  avg_color_blk[1] = total_rgb_blk[1] / total_pixels_blk;
530  avg_color_blk[2] = total_rgb_blk[2] / total_pixels_blk;
531 
532  /*
533  Are all the pixels within threshold of the average color?
534  */
535  is_in_range = (max_color_blk[0] - avg_color_blk[0] <= threshold &&
536  max_color_blk[1] - avg_color_blk[1] <= threshold &&
537  max_color_blk[2] - avg_color_blk[2] <= threshold &&
538  avg_color_blk[0] - min_color_blk[0] <= threshold &&
539  avg_color_blk[1] - min_color_blk[1] <= threshold &&
540  avg_color_blk[2] - min_color_blk[2] <= threshold);
541 
542  if (is_in_range) {
543  /*
544  Set the output variables to include this block.
545  */
546  min_color[0] = min_color_blk[0];
547  min_color[1] = min_color_blk[1];
548  min_color[2] = min_color_blk[2];
549  max_color[0] = max_color_blk[0];
550  max_color[1] = max_color_blk[1];
551  max_color[2] = max_color_blk[2];
552  total_rgb[0] = total_rgb_blk[0];
553  total_rgb[1] = total_rgb_blk[1];
554  total_rgb[2] = total_rgb_blk[2];
555  *total_pixels = total_pixels_blk;
556  avg_color[0] = avg_color_blk[0];
557  avg_color[1] = avg_color_blk[1];
558  avg_color[2] = avg_color_blk[2];
559  }
560 
561  return is_in_range;
562 }
563 
564 static void rpza_encode_stream(RpzaContext *s, const AVFrame *pict)
565 {
566  BlockInfo bi;
567  int block_counter = 0;
568  int n_blocks;
569  int total_blocks;
570  int prev_block_offset;
571  int block_offset = 0;
572  int pblock_offset = 0;
573  uint8_t min = 0, max = 0;
574  channel_offset chan;
575  int i;
576  int tmp_min, tmp_max;
577  int total_rgb[3];
578  uint8_t avg_color[3];
579  int pixel_count;
580  uint8_t min_color[3], max_color[3];
581  int slope, y_intercept, correlation_coef;
582  const uint16_t *src_pixels = (const uint16_t *)pict->data[0];
583  uint16_t *prev_pixels = (uint16_t *)s->prev_frame->data[0];
584 
585  /* Number of 4x4 blocks in frame. */
586  total_blocks = ((s->frame_width + 3) / 4) * ((s->frame_height + 3) / 4);
587 
588  bi.image_width = s->frame_width;
589  bi.image_height = s->frame_height;
590  bi.rowstride = pict->linesize[0] / 2;
591  bi.prev_rowstride = s->prev_frame->linesize[0] / 2;
592 
593  bi.blocks_per_row = (s->frame_width + 3) / 4;
594 
595  while (block_counter < total_blocks) {
596  // SKIP CHECK
597  // make sure we have a valid previous frame and we're not writing
598  // a key frame
599  if (!s->first_frame) {
600  n_blocks = 0;
601  prev_block_offset = 0;
602 
603  while (n_blocks < 32 && block_counter + n_blocks < total_blocks) {
604  block_offset = get_block_info(&bi, block_counter + n_blocks, 0);
605  pblock_offset = get_block_info(&bi, block_counter + n_blocks, 1);
606 
607  // multi-block opcodes cannot span multiple rows.
608  // If we're starting a new row, break out and write the opcode
609  /* TODO: Should eventually use bi.row here to determine when a
610  row break occurs, but that is currently breaking the
611  quicktime player. This is probably due to a bug in the
612  way I'm calculating the current row.
613  */
614  if (prev_block_offset && block_offset - prev_block_offset > 12) {
615  break;
616  }
617 
618  prev_block_offset = block_offset;
619 
620  if (compare_blocks(&prev_pixels[pblock_offset],
621  &src_pixels[block_offset], &bi, s->skip_frame_thresh) != 0) {
622  // write out skipable blocks
623  if (n_blocks) {
624 
625  // write skip opcode
626  put_bits(&s->pb, 8, 0x80 | (n_blocks - 1));
627  block_counter += n_blocks;
628 
629  goto post_skip;
630  }
631  break;
632  }
633 
634  /*
635  * NOTE: we don't update skipped blocks in the previous frame buffer
636  * since skipped needs always to be compared against the first skipped
637  * block to avoid artifacts during gradual fade in/outs.
638  */
639 
640  // update_block_in_prev_frame(&src_pixels[block_offset],
641  // &prev_pixels[pblock_offset], &bi, block_counter + n_blocks);
642 
643  n_blocks++;
644  }
645 
646  // we're either at the end of the frame or we've reached the maximum
647  // of 32 blocks in a run. Write out the run.
648  if (n_blocks) {
649  // write skip opcode
650  put_bits(&s->pb, 8, 0x80 | (n_blocks - 1));
651  block_counter += n_blocks;
652 
653  continue;
654  }
655 
656  } else {
657  block_offset = get_block_info(&bi, block_counter, 0);
658  pblock_offset = get_block_info(&bi, block_counter, 1);
659  }
660 post_skip :
661 
662  // ONE COLOR CHECK
663  if (update_block_stats(s, &bi, &src_pixels[block_offset],
664  min_color, max_color,
665  total_rgb, &pixel_count, avg_color, 1)) {
666  prev_block_offset = block_offset;
667 
668  n_blocks = 1;
669 
670  /* update this block in the previous frame buffer */
671  update_block_in_prev_frame(&src_pixels[block_offset],
672  &prev_pixels[pblock_offset], &bi, block_counter + n_blocks);
673 
674  // check for subsequent blocks with the same color
675  while (n_blocks < 32 && block_counter + n_blocks < total_blocks) {
676  block_offset = get_block_info(&bi, block_counter + n_blocks, 0);
677  pblock_offset = get_block_info(&bi, block_counter + n_blocks, 1);
678 
679  // multi-block opcodes cannot span multiple rows.
680  // If we've hit end of a row, break out and write the opcode
681  if (block_offset - prev_block_offset > 12) {
682  break;
683  }
684 
685  if (!update_block_stats(s, &bi, &src_pixels[block_offset],
686  min_color, max_color,
687  total_rgb, &pixel_count, avg_color, 0)) {
688  break;
689  }
690 
691  prev_block_offset = block_offset;
692 
693  /* update this block in the previous frame buffer */
694  update_block_in_prev_frame(&src_pixels[block_offset],
695  &prev_pixels[pblock_offset], &bi, block_counter + n_blocks);
696 
697  n_blocks++;
698  }
699 
700  // write one color opcode.
701  put_bits(&s->pb, 8, 0xa0 | (n_blocks - 1));
702  // write color to encode.
703  put_bits(&s->pb, 16, rgb24_to_rgb555(avg_color));
704  // skip past the blocks we've just encoded.
705  block_counter += n_blocks;
706  } else { // FOUR COLOR CHECK
707  int err = 0;
708 
709  // get max component diff for block
710  get_max_component_diff(&bi, &src_pixels[block_offset], &min, &max, &chan);
711 
712  min_color[0] = 0;
713  max_color[0] = 0;
714  min_color[1] = 0;
715  max_color[1] = 0;
716  min_color[2] = 0;
717  max_color[2] = 0;
718 
719  // run least squares against other two components
720  for (i = 0; i < 3; i++) {
721  if (i == chan) {
722  min_color[i] = min;
723  max_color[i] = max;
724  continue;
725  }
726 
727  slope = y_intercept = correlation_coef = 0;
728 
729  if (leastsquares(&src_pixels[block_offset], &bi, chan, i,
730  &slope, &y_intercept, &correlation_coef)) {
731  min_color[i] = GET_CHAN(src_pixels[block_offset], i);
732  max_color[i] = GET_CHAN(src_pixels[block_offset], i);
733  } else {
734  tmp_min = 1 + min * slope + y_intercept;
735  tmp_max = 1 + max * slope + y_intercept;
736 
737  av_assert0(tmp_min <= tmp_max);
738  // clamp min and max color values
739  tmp_min = av_clip_uint8(tmp_min);
740  tmp_max = av_clip_uint8(tmp_max);
741 
742  err = FFMAX(calc_lsq_max_fit_error(&src_pixels[block_offset], &bi,
743  min, max, tmp_min, tmp_max, chan, i), err);
744 
745  min_color[i] = tmp_min;
746  max_color[i] = tmp_max;
747  }
748  }
749 
750  if (err > s->sixteen_color_thresh) { // DO SIXTEEN COLOR BLOCK
751  const uint16_t *row_ptr;
752  int y_size, rgb555;
753 
754  block_offset = get_block_info(&bi, block_counter, 0);
755  pblock_offset = get_block_info(&bi, block_counter, 1);
756 
757  row_ptr = &src_pixels[block_offset];
758  y_size = FFMIN(4, bi.image_height - bi.row * 4);
759 
760  for (int y = 0; y < y_size; y++) {
761  for (int x = 0; x < 4; x++) {
762  rgb555 = row_ptr[x] & ~0x8000;
763 
764  put_bits(&s->pb, 16, rgb555);
765  }
766  row_ptr += bi.rowstride;
767  }
768 
769  for (int y = y_size; y < 4; y++) {
770  for (int x = 0; x < 4; x++)
771  put_bits(&s->pb, 16, 0);
772  }
773 
774  block_counter++;
775  } else { // FOUR COLOR BLOCK
776  block_counter += encode_four_color_block(min_color, max_color,
777  &s->pb, &src_pixels[block_offset], &bi);
778  }
779 
780  /* update this block in the previous frame buffer */
781  update_block_in_prev_frame(&src_pixels[block_offset],
782  &prev_pixels[pblock_offset], &bi, block_counter);
783  }
784  }
785 }
786 
788 {
789  RpzaContext *s = avctx->priv_data;
790 
791  s->frame_width = avctx->width;
792  s->frame_height = avctx->height;
793 
794  s->prev_frame = av_frame_alloc();
795  if (!s->prev_frame)
796  return AVERROR(ENOMEM);
797 
798  return 0;
799 }
800 
802  const AVFrame *pict, int *got_packet)
803 {
804  RpzaContext *s = avctx->priv_data;
805  uint8_t *buf;
806  int ret = ff_alloc_packet(avctx, pkt, 4LL + 6LL * FFMAX(avctx->height, 4) * FFMAX(avctx->width, 4));
807 
808  if (ret < 0)
809  return ret;
810 
811  init_put_bits(&s->pb, pkt->data, pkt->size);
812 
813  // skip 4 byte header, write it later once the size of the chunk is known
814  put_bits32(&s->pb, 0x00);
815 
816  if (!s->prev_frame->data[0]) {
817  s->first_frame = 1;
818  s->prev_frame->format = pict->format;
819  s->prev_frame->width = pict->width;
820  s->prev_frame->height = pict->height;
821  ret = av_frame_get_buffer(s->prev_frame, 0);
822  if (ret < 0)
823  return ret;
824  } else {
825  s->first_frame = 0;
826  }
827 
828  rpza_encode_stream(s, pict);
829 
830  flush_put_bits(&s->pb);
831 
833  buf = pkt->data;
834 
835  // write header opcode
836  buf[0] = 0xe1; // chunk opcode
837 
838  // write chunk length
839  AV_WB24(buf + 1, pkt->size);
840 
841  *got_packet = 1;
842 
843  return 0;
844 }
845 
846 static int rpza_encode_end(AVCodecContext *avctx)
847 {
848  RpzaContext *s = (RpzaContext *)avctx->priv_data;
849 
850  av_frame_free(&s->prev_frame);
851 
852  return 0;
853 }
854 
855 #define OFFSET(x) offsetof(RpzaContext, x)
856 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
857 static const AVOption options[] = {
858  { "skip_frame_thresh", NULL, OFFSET(skip_frame_thresh), AV_OPT_TYPE_INT, {.i64=1}, 0, 24, VE},
859  { "start_one_color_thresh", NULL, OFFSET(start_one_color_thresh), AV_OPT_TYPE_INT, {.i64=1}, 0, 24, VE},
860  { "continue_one_color_thresh", NULL, OFFSET(continue_one_color_thresh), AV_OPT_TYPE_INT, {.i64=0}, 0, 24, VE},
861  { "sixteen_color_thresh", NULL, OFFSET(sixteen_color_thresh), AV_OPT_TYPE_INT, {.i64=1}, 0, 24, VE},
862  { NULL },
863 };
864 
865 static const AVClass rpza_class = {
866  .class_name = "rpza",
867  .item_name = av_default_item_name,
868  .option = options,
869  .version = LIBAVUTIL_VERSION_INT,
870 };
871 
873  .p.name = "rpza",
874  CODEC_LONG_NAME("QuickTime video (RPZA)"),
875  .p.type = AVMEDIA_TYPE_VIDEO,
876  .p.id = AV_CODEC_ID_RPZA,
878  .priv_data_size = sizeof(RpzaContext),
879  .p.priv_class = &rpza_class,
880  .init = rpza_encode_init,
882  .close = rpza_encode_end,
883  .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_RGB555,
885 };
OFFSET
#define OFFSET(x)
Definition: rpzaenc.c:855
rgb::b
uint8_t b
Definition: rpzaenc.c:63
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
RED
@ RED
Definition: rpzaenc.c:55
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:291
get_max_component_diff
static void get_max_component_diff(const BlockInfo *bi, const uint16_t *block_ptr, uint8_t *min, uint8_t *max, channel_offset *chan)
Definition: rpzaenc.c:197
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:288
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:89
color
Definition: vf_paletteuse.c:512
rgb24_to_rgb555
static uint16_t rgb24_to_rgb555(const uint8_t *rgb24)
Definition: rpzaenc.c:140
BlockInfo::block_index
int block_index
Definition: rpzaenc.c:81
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVFrame::width
int width
Definition: frame.h:447
RpzaContext::first_frame
int first_frame
Definition: rpzaenc.c:50
AVPacket::data
uint8_t * data
Definition: packet.h:524
BlockInfo::start
uint16_t start
Definition: rpzaenc.c:82
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
RpzaContext::skip_frame_thresh
int skip_frame_thresh
Definition: rpzaenc.c:39
B
#define B(color)
Definition: rpzaenc.c:72
RpzaContext
Definition: rpza.c:45
FFCodec
Definition: codec_internal.h:127
max
#define max(a, b)
Definition: cuda_runtime.h:33
BlockInfo::row
int row
Definition: rpzaenc.c:75
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
BlockInfo::rowstride
int rowstride
Definition: rpzaenc.c:83
SQR
#define SQR(x)
Definition: rpzaenc.c:66
BlockInfo::image_width
int image_width
Definition: rpzaenc.c:79
GREEN
@ GREEN
Definition: rpzaenc.c:56
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:396
rgb
Definition: rpzaenc.c:60
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: packet.c:113
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:296
ff_sqrt
#define ff_sqrt
Definition: mathops.h:218
ff_rpza_encoder
const FFCodec ff_rpza_encoder
Definition: rpzaenc.c:872
BlockInfo::col
int col
Definition: rpzaenc.c:76
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
update_block_in_prev_frame
static void update_block_in_prev_frame(const uint16_t *src_pixels, uint16_t *dest_pixels, const BlockInfo *bi, int block_counter)
Definition: rpzaenc.c:431
update_block_stats
static int update_block_stats(RpzaContext *s, const BlockInfo *bi, const uint16_t *block, uint8_t min_color[3], uint8_t max_color[3], int *total_rgb, int *total_pixels, uint8_t avg_color[3], int first_block)
Definition: rpzaenc.c:460
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
BlockInfo::image_height
int image_height
Definition: rpzaenc.c:80
calc_lsq_max_fit_error
static int calc_lsq_max_fit_error(const uint16_t *block_ptr, const BlockInfo *bi, int min, int max, int tmp_min, int tmp_max, channel_offset xchannel, channel_offset ychannel)
Definition: rpzaenc.c:321
s
#define s(width, name)
Definition: cbs_vp9.c:198
g
const char * g
Definition: vf_curves.c:128
rpza_encode_end
static int rpza_encode_end(AVCodecContext *avctx)
Definition: rpzaenc.c:846
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
rpza_encode_init
static int rpza_encode_init(AVCodecContext *avctx)
Definition: rpzaenc.c:787
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
RpzaContext::avclass
AVClass * avclass
Definition: rpzaenc.c:37
BlockInfo::block_height
int block_height
Definition: rpzaenc.c:78
rpza_encode_frame
static int rpza_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: rpzaenc.c:801
G
#define G(color)
Definition: rpzaenc.c:71
get_block_info
static int get_block_info(BlockInfo *bi, int block, int prev_frame)
Definition: rpzaenc.c:118
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
BlockInfo::blocks_per_row
int blocks_per_row
Definition: rpzaenc.c:85
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
BLUE
@ BLUE
Definition: rpzaenc.c:57
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
leastsquares
static int leastsquares(const uint16_t *block_ptr, const BlockInfo *bi, channel_offset xchannel, channel_offset ychannel, int *slope, int *y_intercept, int *correlation_coef)
Definition: rpzaenc.c:268
rpza_class
static const AVClass rpza_class
Definition: rpzaenc.c:865
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
mathops.h
VE
#define VE
Definition: rpzaenc.c:856
BlockInfo
Definition: dvdec.c:57
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
codec_internal.h
RpzaContext::start_one_color_thresh
int start_one_color_thresh
Definition: rpzaenc.c:40
AV_WB24
#define AV_WB24(p, d)
Definition: intreadwrite.h:448
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:462
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
RpzaContext::frame_height
int frame_height
Definition: rpzaenc.c:48
block1
static int16_t block1[64]
Definition: dct.c:120
rgb::g
uint8_t g
Definition: rpzaenc.c:62
channel_offset
channel_offset
Definition: rpzaenc.c:54
match_color
static int match_color(const uint16_t *color, uint8_t colors[4][3])
Definition: rpzaenc.c:362
compare_blocks
static int compare_blocks(const uint16_t *block1, const uint16_t *block2, const BlockInfo *bi, int thresh)
Definition: rpzaenc.c:247
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
RpzaContext::pb
PutBitContext pb
Definition: rpzaenc.c:45
max_component_diff
static int max_component_diff(const uint16_t *colorA, const uint16_t *colorB)
Definition: rpzaenc.c:173
common.h
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:466
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
rgb::r
uint8_t r
Definition: rpzaenc.c:61
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::height
int height
Definition: avcodec.h:618
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
encode_four_color_block
static int encode_four_color_block(const uint8_t *min_color, const uint8_t *max_color, PutBitContext *pb, const uint16_t *block_ptr, const BlockInfo *bi)
Definition: rpzaenc.c:389
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:447
BlockInfo::total_blocks
int total_blocks
Definition: rpzaenc.c:86
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
get_colors
static void get_colors(const uint8_t *min, const uint8_t *max, uint8_t color4[4][3])
Definition: rpzaenc.c:89
options
static const AVOption options[]
Definition: rpzaenc.c:857
BlockInfo::block_width
int block_width
Definition: rpzaenc.c:77
av_clip_uint8
#define av_clip_uint8
Definition: common.h:105
BlockInfo::prev_rowstride
int prev_rowstride
Definition: rpzaenc.c:84
RpzaContext::frame_width
int frame_width
Definition: rpzaenc.c:47
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_ID_RPZA
@ AV_CODEC_ID_RPZA
Definition: codec_id.h:94
diff_colors
static int diff_colors(const uint8_t *colorA, const uint8_t *colorB)
Definition: rpzaenc.c:159
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
R
#define R(color)
Definition: rpzaenc.c:70
rpza_encode_stream
static void rpza_encode_stream(RpzaContext *s, const AVFrame *pict)
Definition: rpzaenc.c:564
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:420
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
put_bits.h
RpzaContext::prev_frame
AVFrame * prev_frame
Definition: rpzaenc.c:44
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
RpzaContext::sixteen_color_thresh
int sixteen_color_thresh
Definition: rpzaenc.c:42
RpzaContext::continue_one_color_thresh
int continue_one_color_thresh
Definition: rpzaenc.c:41
channel
channel
Definition: ebur128.h:39
min
float min
Definition: vorbis_enc_data.h:429
GET_CHAN
#define GET_CHAN(color, chan)
Definition: rpzaenc.c:69