FFmpeg
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "config_components.h"
32 
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "encode.h"
36 #include "huffyuv.h"
37 #include "huffman.h"
38 #include "huffyuvencdsp.h"
39 #include "lossless_videoencdsp.h"
40 #include "put_bits.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/pixdesc.h"
43 
44 static inline void diff_bytes(HYuvContext *s, uint8_t *dst,
45  const uint8_t *src0, const uint8_t *src1, int w)
46 {
47  if (s->bps <= 8) {
48  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
49  } else {
50  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
51  }
52 }
53 
54 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
55  const uint8_t *src, int w, int left)
56 {
57  int i;
58  int min_width = FFMIN(w, 32);
59 
60  if (s->bps <= 8) {
61  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
62  const int temp = src[i];
63  dst[i] = temp - left;
64  left = temp;
65  }
66  if (w < 32)
67  return left;
68  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
69  return src[w-1];
70  } else {
71  const uint16_t *src16 = (const uint16_t *)src;
72  uint16_t *dst16 = ( uint16_t *)dst;
73  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
74  const int temp = src16[i];
75  dst16[i] = temp - left;
76  left = temp;
77  }
78  if (w < 32)
79  return left;
80  s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
81  return src16[w-1];
82  }
83 }
84 
85 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
86  const uint8_t *src, int w,
87  int *red, int *green, int *blue,
88  int *alpha)
89 {
90  int i;
91  int r, g, b, a;
92  int min_width = FFMIN(w, 8);
93  r = *red;
94  g = *green;
95  b = *blue;
96  a = *alpha;
97 
98  for (i = 0; i < min_width; i++) {
99  const int rt = src[i * 4 + R];
100  const int gt = src[i * 4 + G];
101  const int bt = src[i * 4 + B];
102  const int at = src[i * 4 + A];
103  dst[i * 4 + R] = rt - r;
104  dst[i * 4 + G] = gt - g;
105  dst[i * 4 + B] = bt - b;
106  dst[i * 4 + A] = at - a;
107  r = rt;
108  g = gt;
109  b = bt;
110  a = at;
111  }
112 
113  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
114 
115  *red = src[(w - 1) * 4 + R];
116  *green = src[(w - 1) * 4 + G];
117  *blue = src[(w - 1) * 4 + B];
118  *alpha = src[(w - 1) * 4 + A];
119 }
120 
121 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
122  uint8_t *src, int w,
123  int *red, int *green, int *blue)
124 {
125  int i;
126  int r, g, b;
127  r = *red;
128  g = *green;
129  b = *blue;
130  for (i = 0; i < FFMIN(w, 16); i++) {
131  const int rt = src[i * 3 + 0];
132  const int gt = src[i * 3 + 1];
133  const int bt = src[i * 3 + 2];
134  dst[i * 3 + 0] = rt - r;
135  dst[i * 3 + 1] = gt - g;
136  dst[i * 3 + 2] = bt - b;
137  r = rt;
138  g = gt;
139  b = bt;
140  }
141 
142  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
143 
144  *red = src[(w - 1) * 3 + 0];
145  *green = src[(w - 1) * 3 + 1];
146  *blue = src[(w - 1) * 3 + 2];
147 }
148 
149 static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
150 {
151  if (s->bps <= 8) {
152  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
153  } else {
154  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
155  }
156 }
157 
158 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
159 {
160  int i;
161  int index = 0;
162  int n = s->vlc_n;
163 
164  for (i = 0; i < n;) {
165  int val = len[i];
166  int repeat = 0;
167 
168  for (; i < n && len[i] == val && repeat < 255; i++)
169  repeat++;
170 
171  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
172  if (repeat > 7) {
173  buf[index++] = val;
174  buf[index++] = repeat;
175  } else {
176  buf[index++] = val | (repeat << 5);
177  }
178  }
179 
180  return index;
181 }
182 
183 static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
184 {
185  int i, ret;
186  int size = 0;
187  int count = 3;
188 
189  if (s->version > 2)
190  count = 1 + s->alpha + 2*s->chroma;
191 
192  for (i = 0; i < count; i++) {
193  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
194  return ret;
195 
196  if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
197  return -1;
198  }
199 
200  size += store_table(s, s->len[i], buf + size);
201  }
202  return size;
203 }
204 
206 {
207  HYuvContext *s = avctx->priv_data;
208  int i, j;
209  int ret;
211 
212  ff_huffyuv_common_init(avctx);
213  ff_huffyuvencdsp_init(&s->hencdsp, avctx);
214  ff_llvidencdsp_init(&s->llvidencdsp);
215 
216  avctx->extradata = av_mallocz(3*MAX_N + 4);
217  if (s->flags&AV_CODEC_FLAG_PASS1) {
218 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
219  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
220  if (!avctx->stats_out)
221  return AVERROR(ENOMEM);
222  }
223  s->version = 2;
224 
225  if (!avctx->extradata)
226  return AVERROR(ENOMEM);
227 
228  s->bps = desc->comp[0].depth;
229  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
230  s->chroma = desc->nb_components > 2;
231  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
233  &s->chroma_h_shift,
234  &s->chroma_v_shift);
235 
236  switch (avctx->pix_fmt) {
237  case AV_PIX_FMT_YUV420P:
238  case AV_PIX_FMT_YUV422P:
239  if (s->width & 1) {
240  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
241  return AVERROR(EINVAL);
242  }
243  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
244  break;
245  case AV_PIX_FMT_YUV444P:
246  case AV_PIX_FMT_YUV410P:
247  case AV_PIX_FMT_YUV411P:
248  case AV_PIX_FMT_YUV440P:
249  case AV_PIX_FMT_GBRP:
250  case AV_PIX_FMT_GBRP9:
251  case AV_PIX_FMT_GBRP10:
252  case AV_PIX_FMT_GBRP12:
253  case AV_PIX_FMT_GBRP14:
254  case AV_PIX_FMT_GBRP16:
255  case AV_PIX_FMT_GRAY8:
256  case AV_PIX_FMT_GRAY16:
257  case AV_PIX_FMT_YUVA444P:
258  case AV_PIX_FMT_YUVA420P:
259  case AV_PIX_FMT_YUVA422P:
260  case AV_PIX_FMT_GBRAP:
261  case AV_PIX_FMT_YUV420P9:
266  case AV_PIX_FMT_YUV422P9:
271  case AV_PIX_FMT_YUV444P9:
285  s->version = 3;
286  break;
287  case AV_PIX_FMT_RGB32:
288  s->bitstream_bpp = 32;
289  break;
290  case AV_PIX_FMT_RGB24:
291  s->bitstream_bpp = 24;
292  break;
293  default:
294  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
295  return AVERROR(EINVAL);
296  }
297  s->n = 1<<s->bps;
298  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
299 
300  avctx->bits_per_coded_sample = s->bitstream_bpp;
301  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
302  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
303  if (s->context) {
304  if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
305  av_log(avctx, AV_LOG_ERROR,
306  "context=1 is not compatible with "
307  "2 pass huffyuv encoding\n");
308  return AVERROR(EINVAL);
309  }
310  }
311 
312  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
313  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
314  av_log(avctx, AV_LOG_ERROR,
315  "Error: YV12 is not supported by huffyuv; use "
316  "vcodec=ffvhuff or format=422p\n");
317  return AVERROR(EINVAL);
318  }
319  if (s->interlaced != ( s->height > 288 ))
320  av_log(avctx, AV_LOG_INFO,
321  "using huffyuv 2.2.0 or newer interlacing flag\n");
322  }
323 
324  if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
325  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
326  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
327  return AVERROR(EINVAL);
328  }
329 
330  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
331  av_log(avctx, AV_LOG_ERROR,
332  "Error: RGB is incompatible with median predictor\n");
333  return AVERROR(EINVAL);
334  }
335 
336  ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
337  ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
338  if (s->context)
339  ((uint8_t*)avctx->extradata)[2] |= 0x40;
340  if (s->version < 3) {
341  ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
342  ((uint8_t*)avctx->extradata)[3] = 0;
343  } else {
344  ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
345  if (s->chroma)
346  ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2;
347  if (s->alpha)
348  ((uint8_t*)avctx->extradata)[2] |= 4;
349  ((uint8_t*)avctx->extradata)[3] = 1;
350  }
351  s->avctx->extradata_size = 4;
352 
353  if (avctx->stats_in) {
354  char *p = avctx->stats_in;
355 
356  for (i = 0; i < 4; i++)
357  for (j = 0; j < s->vlc_n; j++)
358  s->stats[i][j] = 1;
359 
360  for (;;) {
361  for (i = 0; i < 4; i++) {
362  char *next;
363 
364  for (j = 0; j < s->vlc_n; j++) {
365  s->stats[i][j] += strtol(p, &next, 0);
366  if (next == p) return -1;
367  p = next;
368  }
369  }
370  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
371  }
372  } else {
373  for (i = 0; i < 4; i++)
374  for (j = 0; j < s->vlc_n; j++) {
375  int d = FFMIN(j, s->vlc_n - j);
376 
377  s->stats[i][j] = 100000000 / (d*d + 1);
378  }
379  }
380 
381  ret = store_huffman_tables(s, s->avctx->extradata + s->avctx->extradata_size);
382  if (ret < 0)
383  return ret;
384  s->avctx->extradata_size += ret;
385 
386  if (s->context) {
387  for (i = 0; i < 4; i++) {
388  int pels = s->width * s->height / (i ? 40 : 10);
389  for (j = 0; j < s->vlc_n; j++) {
390  int d = FFMIN(j, s->vlc_n - j);
391  s->stats[i][j] = pels/(d*d + 1);
392  }
393  }
394  } else {
395  for (i = 0; i < 4; i++)
396  for (j = 0; j < s->vlc_n; j++)
397  s->stats[i][j]= 0;
398  }
399 
400  if (ff_huffyuv_alloc_temp(s)) {
402  return AVERROR(ENOMEM);
403  }
404 
405  s->picture_number=0;
406 
407  return 0;
408 }
409 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
410 {
411  int i;
412  const uint8_t *y = s->temp[0] + offset;
413  const uint8_t *u = s->temp[1] + offset / 2;
414  const uint8_t *v = s->temp[2] + offset / 2;
415 
416  if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
417  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
418  return -1;
419  }
420 
421 #define LOAD4\
422  int y0 = y[2 * i];\
423  int y1 = y[2 * i + 1];\
424  int u0 = u[i];\
425  int v0 = v[i];
426 
427  count /= 2;
428 
429  if (s->flags & AV_CODEC_FLAG_PASS1) {
430  for(i = 0; i < count; i++) {
431  LOAD4;
432  s->stats[0][y0]++;
433  s->stats[1][u0]++;
434  s->stats[0][y1]++;
435  s->stats[2][v0]++;
436  }
437  }
438  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
439  return 0;
440  if (s->context) {
441  for (i = 0; i < count; i++) {
442  LOAD4;
443  s->stats[0][y0]++;
444  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
445  s->stats[1][u0]++;
446  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
447  s->stats[0][y1]++;
448  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
449  s->stats[2][v0]++;
450  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
451  }
452  } else {
453  for(i = 0; i < count; i++) {
454  LOAD4;
455  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
456  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
457  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
458  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
459  }
460  }
461  return 0;
462 }
463 
464 static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
465 {
466  int i, count = width/2;
467 
468  if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
469  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
470  return -1;
471  }
472 
473 #define LOADEND\
474  int y0 = s->temp[0][width-1];
475 #define LOADEND_14\
476  int y0 = s->temp16[0][width-1] & mask;
477 #define LOADEND_16\
478  int y0 = s->temp16[0][width-1];
479 #define STATEND\
480  s->stats[plane][y0]++;
481 #define STATEND_16\
482  s->stats[plane][y0>>2]++;
483 #define WRITEEND\
484  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
485 #define WRITEEND_16\
486  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
487  put_bits(&s->pb, 2, y0&3);
488 
489 #define LOAD2\
490  int y0 = s->temp[0][2 * i];\
491  int y1 = s->temp[0][2 * i + 1];
492 #define LOAD2_14\
493  int y0 = s->temp16[0][2 * i] & mask;\
494  int y1 = s->temp16[0][2 * i + 1] & mask;
495 #define LOAD2_16\
496  int y0 = s->temp16[0][2 * i];\
497  int y1 = s->temp16[0][2 * i + 1];
498 #define STAT2\
499  s->stats[plane][y0]++;\
500  s->stats[plane][y1]++;
501 #define STAT2_16\
502  s->stats[plane][y0>>2]++;\
503  s->stats[plane][y1>>2]++;
504 #define WRITE2\
505  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
506  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
507 #define WRITE2_16\
508  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
509  put_bits(&s->pb, 2, y0&3);\
510  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
511  put_bits(&s->pb, 2, y1&3);
512 
513  if (s->bps <= 8) {
514  if (s->flags & AV_CODEC_FLAG_PASS1) {
515  for (i = 0; i < count; i++) {
516  LOAD2;
517  STAT2;
518  }
519  if (width&1) {
520  LOADEND;
521  STATEND;
522  }
523  }
524  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
525  return 0;
526 
527  if (s->context) {
528  for (i = 0; i < count; i++) {
529  LOAD2;
530  STAT2;
531  WRITE2;
532  }
533  if (width&1) {
534  LOADEND;
535  STATEND;
536  WRITEEND;
537  }
538  } else {
539  for (i = 0; i < count; i++) {
540  LOAD2;
541  WRITE2;
542  }
543  if (width&1) {
544  LOADEND;
545  WRITEEND;
546  }
547  }
548  } else if (s->bps <= 14) {
549  int mask = s->n - 1;
550  if (s->flags & AV_CODEC_FLAG_PASS1) {
551  for (i = 0; i < count; i++) {
552  LOAD2_14;
553  STAT2;
554  }
555  if (width&1) {
556  LOADEND_14;
557  STATEND;
558  }
559  }
560  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
561  return 0;
562 
563  if (s->context) {
564  for (i = 0; i < count; i++) {
565  LOAD2_14;
566  STAT2;
567  WRITE2;
568  }
569  if (width&1) {
570  LOADEND_14;
571  STATEND;
572  WRITEEND;
573  }
574  } else {
575  for (i = 0; i < count; i++) {
576  LOAD2_14;
577  WRITE2;
578  }
579  if (width&1) {
580  LOADEND_14;
581  WRITEEND;
582  }
583  }
584  } else {
585  if (s->flags & AV_CODEC_FLAG_PASS1) {
586  for (i = 0; i < count; i++) {
587  LOAD2_16;
588  STAT2_16;
589  }
590  if (width&1) {
591  LOADEND_16;
592  STATEND_16;
593  }
594  }
595  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
596  return 0;
597 
598  if (s->context) {
599  for (i = 0; i < count; i++) {
600  LOAD2_16;
601  STAT2_16;
602  WRITE2_16;
603  }
604  if (width&1) {
605  LOADEND_16;
606  STATEND_16;
607  WRITEEND_16;
608  }
609  } else {
610  for (i = 0; i < count; i++) {
611  LOAD2_16;
612  WRITE2_16;
613  }
614  if (width&1) {
615  LOADEND_16;
616  WRITEEND_16;
617  }
618  }
619  }
620 #undef LOAD2
621 #undef STAT2
622 #undef WRITE2
623  return 0;
624 }
625 
626 static int encode_gray_bitstream(HYuvContext *s, int count)
627 {
628  int i;
629 
630  if (put_bytes_left(&s->pb, 0) < 4 * count) {
631  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
632  return -1;
633  }
634 
635 #define LOAD2\
636  int y0 = s->temp[0][2 * i];\
637  int y1 = s->temp[0][2 * i + 1];
638 #define STAT2\
639  s->stats[0][y0]++;\
640  s->stats[0][y1]++;
641 #define WRITE2\
642  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
643  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
644 
645  count /= 2;
646 
647  if (s->flags & AV_CODEC_FLAG_PASS1) {
648  for (i = 0; i < count; i++) {
649  LOAD2;
650  STAT2;
651  }
652  }
653  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
654  return 0;
655 
656  if (s->context) {
657  for (i = 0; i < count; i++) {
658  LOAD2;
659  STAT2;
660  WRITE2;
661  }
662  } else {
663  for (i = 0; i < count; i++) {
664  LOAD2;
665  WRITE2;
666  }
667  }
668  return 0;
669 }
670 
671 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
672 {
673  int i;
674 
675  if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
676  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
677  return -1;
678  }
679 
680 #define LOAD_GBRA \
681  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
682  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
683  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
684  int a = s->temp[0][planes * i + A];
685 
686 #define STAT_BGRA \
687  s->stats[0][b]++; \
688  s->stats[1][g]++; \
689  s->stats[2][r]++; \
690  if (planes == 4) \
691  s->stats[2][a]++;
692 
693 #define WRITE_GBRA \
694  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
695  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
696  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
697  if (planes == 4) \
698  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
699 
700  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
701  (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
702  for (i = 0; i < count; i++) {
703  LOAD_GBRA;
704  STAT_BGRA;
705  }
706  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
707  for (i = 0; i < count; i++) {
708  LOAD_GBRA;
709  STAT_BGRA;
710  WRITE_GBRA;
711  }
712  } else {
713  for (i = 0; i < count; i++) {
714  LOAD_GBRA;
715  WRITE_GBRA;
716  }
717  }
718  return 0;
719 }
720 
722  const AVFrame *pict, int *got_packet)
723 {
724  HYuvContext *s = avctx->priv_data;
725  const int width = s->width;
726  const int width2 = s->width>>1;
727  const int height = s->height;
728  const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
729  const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
730  const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
731  const AVFrame * const p = pict;
732  int i, j, size = 0, ret;
733 
734  if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0)
735  return ret;
736 
737  if (s->context) {
739  if (size < 0)
740  return size;
741 
742  for (i = 0; i < 4; i++)
743  for (j = 0; j < s->vlc_n; j++)
744  s->stats[i][j] >>= 1;
745  }
746 
747  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
748 
749  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
750  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
751  int lefty, leftu, leftv, y, cy;
752 
753  put_bits(&s->pb, 8, leftv = p->data[2][0]);
754  put_bits(&s->pb, 8, lefty = p->data[0][1]);
755  put_bits(&s->pb, 8, leftu = p->data[1][0]);
756  put_bits(&s->pb, 8, p->data[0][0]);
757 
758  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
759  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
760  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
761 
763 
764  if (s->predictor==MEDIAN) {
765  int lefttopy, lefttopu, lefttopv;
766  cy = y = 1;
767  if (s->interlaced) {
768  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
769  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
770  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
771 
773  y++; cy++;
774  }
775 
776  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
777  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
778  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
779 
780  encode_422_bitstream(s, 0, 4);
781 
782  lefttopy = p->data[0][3];
783  lefttopu = p->data[1][1];
784  lefttopv = p->data[2][1];
785  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
786  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
787  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
788  encode_422_bitstream(s, 0, width - 4);
789  y++; cy++;
790 
791  for (; y < height; y++,cy++) {
792  uint8_t *ydst, *udst, *vdst;
793 
794  if (s->bitstream_bpp == 12) {
795  while (2 * cy > y) {
796  ydst = p->data[0] + p->linesize[0] * y;
797  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
799  y++;
800  }
801  if (y >= height) break;
802  }
803  ydst = p->data[0] + p->linesize[0] * y;
804  udst = p->data[1] + p->linesize[1] * cy;
805  vdst = p->data[2] + p->linesize[2] * cy;
806 
807  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
808  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
809  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
810 
812  }
813  } else {
814  for (cy = y = 1; y < height; y++, cy++) {
815  uint8_t *ydst, *udst, *vdst;
816 
817  /* encode a luma only line & y++ */
818  if (s->bitstream_bpp == 12) {
819  ydst = p->data[0] + p->linesize[0] * y;
820 
821  if (s->predictor == PLANE && s->interlaced < y) {
822  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
823 
824  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
825  } else {
826  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
827  }
829  y++;
830  if (y >= height) break;
831  }
832 
833  ydst = p->data[0] + p->linesize[0] * y;
834  udst = p->data[1] + p->linesize[1] * cy;
835  vdst = p->data[2] + p->linesize[2] * cy;
836 
837  if (s->predictor == PLANE && s->interlaced < cy) {
838  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
839  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
840  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
841 
842  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
843  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
844  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
845  } else {
846  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
847  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
848  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
849  }
850 
852  }
853  }
854  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
855  uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
856  const int stride = -p->linesize[0];
857  const int fake_stride = -fake_ystride;
858  int y;
859  int leftr, leftg, leftb, lefta;
860 
861  put_bits(&s->pb, 8, lefta = data[A]);
862  put_bits(&s->pb, 8, leftr = data[R]);
863  put_bits(&s->pb, 8, leftg = data[G]);
864  put_bits(&s->pb, 8, leftb = data[B]);
865 
866  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
867  &leftr, &leftg, &leftb, &lefta);
868  encode_bgra_bitstream(s, width - 1, 4);
869 
870  for (y = 1; y < s->height; y++) {
871  uint8_t *dst = data + y*stride;
872  if (s->predictor == PLANE && s->interlaced < y) {
873  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
874  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
875  &leftr, &leftg, &leftb, &lefta);
876  } else {
877  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
878  &leftr, &leftg, &leftb, &lefta);
879  }
881  }
882  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
883  uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
884  const int stride = -p->linesize[0];
885  const int fake_stride = -fake_ystride;
886  int y;
887  int leftr, leftg, leftb;
888 
889  put_bits(&s->pb, 8, leftr = data[0]);
890  put_bits(&s->pb, 8, leftg = data[1]);
891  put_bits(&s->pb, 8, leftb = data[2]);
892  put_bits(&s->pb, 8, 0);
893 
894  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
895  &leftr, &leftg, &leftb);
897 
898  for (y = 1; y < s->height; y++) {
899  uint8_t *dst = data + y * stride;
900  if (s->predictor == PLANE && s->interlaced < y) {
901  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
902  width * 3);
903  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
904  &leftr, &leftg, &leftb);
905  } else {
906  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
907  &leftr, &leftg, &leftb);
908  }
910  }
911  } else if (s->version > 2) {
912  int plane;
913  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
914  int left, y;
915  int w = width;
916  int h = height;
917  int fake_stride = fake_ystride;
918 
919  if (s->chroma && (plane == 1 || plane == 2)) {
920  w >>= s->chroma_h_shift;
921  h >>= s->chroma_v_shift;
922  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
923  }
924 
925  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
926 
927  encode_plane_bitstream(s, w, plane);
928 
929  if (s->predictor==MEDIAN) {
930  int lefttop;
931  y = 1;
932  if (s->interlaced) {
933  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
934 
935  encode_plane_bitstream(s, w, plane);
936  y++;
937  }
938 
939  lefttop = p->data[plane][0];
940 
941  for (; y < h; y++) {
942  uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
943 
944  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
945 
946  encode_plane_bitstream(s, w, plane);
947  }
948  } else {
949  for (y = 1; y < h; y++) {
950  uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
951 
952  if (s->predictor == PLANE && s->interlaced < y) {
953  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
954 
955  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
956  } else {
957  left = sub_left_prediction(s, s->temp[0], dst, w , left);
958  }
959 
960  encode_plane_bitstream(s, w, plane);
961  }
962  }
963  }
964  } else {
965  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
966  }
967  emms_c();
968 
969  size += (put_bits_count(&s->pb) + 31) / 8;
970  put_bits(&s->pb, 16, 0);
971  put_bits(&s->pb, 15, 0);
972  size /= 4;
973 
974  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
975  int j;
976  char *p = avctx->stats_out;
977  char *end = p + STATS_OUT_SIZE;
978  for (i = 0; i < 4; i++) {
979  for (j = 0; j < s->vlc_n; j++) {
980  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
981  p += strlen(p);
982  s->stats[i][j]= 0;
983  }
984  snprintf(p, end-p, "\n");
985  p++;
986  if (end <= p)
987  return AVERROR(ENOMEM);
988  }
989  } else if (avctx->stats_out)
990  avctx->stats_out[0] = '\0';
991  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
992  flush_put_bits(&s->pb);
993  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
994  }
995 
996  s->picture_number++;
997 
998  pkt->size = size * 4;
999  *got_packet = 1;
1000 
1001  return 0;
1002 }
1003 
1005 {
1006  HYuvContext *s = avctx->priv_data;
1007 
1009 
1010  av_freep(&avctx->stats_out);
1011 
1012  return 0;
1013 }
1014 
1015 #define OFFSET(x) offsetof(HYuvContext, x)
1016 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1017 
1018 #define COMMON_OPTIONS \
1019  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1020  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 }, \
1021  0, 1, VE }, \
1022  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1023  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
1024  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
1025  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1026 
1027 static const AVOption normal_options[] = {
1029  { NULL },
1030 };
1031 
1032 static const AVOption ff_options[] = {
1034  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1035  { NULL },
1036 };
1037 
1038 static const AVClass normal_class = {
1039  .class_name = "huffyuv",
1040  .item_name = av_default_item_name,
1041  .option = normal_options,
1042  .version = LIBAVUTIL_VERSION_INT,
1043 };
1044 
1045 static const AVClass ff_class = {
1046  .class_name = "ffvhuff",
1047  .item_name = av_default_item_name,
1048  .option = ff_options,
1049  .version = LIBAVUTIL_VERSION_INT,
1050 };
1051 
1053  .p.name = "huffyuv",
1054  .p.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1055  .p.type = AVMEDIA_TYPE_VIDEO,
1056  .p.id = AV_CODEC_ID_HUFFYUV,
1057  .priv_data_size = sizeof(HYuvContext),
1058  .init = encode_init,
1060  .close = encode_end,
1061  .p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
1062  .p.priv_class = &normal_class,
1063  .p.pix_fmts = (const enum AVPixelFormat[]){
1066  },
1067  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1069 };
1070 
1071 #if CONFIG_FFVHUFF_ENCODER
1072 const FFCodec ff_ffvhuff_encoder = {
1073  .p.name = "ffvhuff",
1074  .p.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1075  .p.type = AVMEDIA_TYPE_VIDEO,
1076  .p.id = AV_CODEC_ID_FFVHUFF,
1077  .priv_data_size = sizeof(HYuvContext),
1078  .init = encode_init,
1080  .close = encode_end,
1081  .p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
1082  .p.priv_class = &ff_class,
1083  .p.pix_fmts = (const enum AVPixelFormat[]){
1099  },
1100  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1102 };
1103 #endif
STATEND_16
#define STATEND_16
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:449
HYuvContext
Definition: huffyuv.h:55
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
sub_median_prediction
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:149
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_huffyuvencdsp_init
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, AVCodecContext *avctx)
Definition: huffyuvencdsp.c:71
WRITE2_16
#define WRITE2_16
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:75
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1305
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
src1
const pixel * src1
Definition: h264pred_template.c:421
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:47
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
OFFSET
#define OFFSET(x)
Definition: huffyuvenc.c:1015
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:205
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:441
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:448
w
uint8_t w
Definition: llviddspenc.c:38
ff_class
static const AVClass ff_class
Definition: huffyuvenc.c:1045
R
#define R
Definition: huffyuvdsp.h:34
AVPacket::data
uint8_t * data
Definition: packet.h:374
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:443
AVOption
AVOption.
Definition: opt.h:251
encode.h
b
#define b
Definition: input.c:34
encode_gray_bitstream
static int encode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvenc.c:626
data
const char data[16]
Definition: mxf.c:143
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:406
FFCodec
Definition: codec_internal.h:112
STATS_OUT_SIZE
#define STATS_OUT_SIZE
MEDIAN
@ MEDIAN
Definition: huffyuv.h:52
WRITEEND_16
#define WRITEEND_16
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:287
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:444
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
LOADEND_14
#define LOADEND_14
STAT2_16
#define STAT2_16
init
static int init
Definition: av_tx.c:47
A
#define A(x)
Definition: vp56_arith.h:28
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:440
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:424
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
v0
#define v0
Definition: regdef.h:26
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:398
sub_left_prediction
static int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:54
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:422
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:450
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:404
LOAD2
#define LOAD2
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:390
AV_CODEC_FLAG2_NO_OUTPUT
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:297
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:263
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:409
store_table
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:158
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:418
mask
static const uint16_t mask[17]
Definition: lzw.c:38
sub_left_prediction_bgr32
static void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:85
STATEND
#define STATEND
WRITE2
#define WRITE2
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1264
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:419
g
const char * g
Definition: vf_curves.c:117
sub_left_prediction_rgb24
static void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:121
STAT2
#define STAT2
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
normal_class
static const AVClass normal_class
Definition: huffyuvenc.c:1038
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:403
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:417
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
AV_INPUT_BUFFER_MIN_SIZE
#define AV_INPUT_BUFFER_MIN_SIZE
Definition: avcodec.h:191
huffyuvencdsp.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:117
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
WRITE_GBRA
#define WRITE_GBRA
MAX_N
#define MAX_N
Definition: huffyuv.h:46
LOADEND_16
#define LOADEND_16
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:425
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
LOADEND
#define LOADEND
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
store_huffman_tables
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
Definition: huffyuvenc.c:183
WRITEEND
#define WRITEEND
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
ff_huffyuv_encoder
const FFCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1052
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:407
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:421
diff_bytes
static void diff_bytes(HYuvContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:44
index
int index
Definition: gxfenc.c:89
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:1004
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
planes
static const struct @328 planes[]
LOAD2_16
#define LOAD2_16
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1256
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
LOAD4
#define LOAD4
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:411
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:413
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:241
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:379
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:445
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: huffyuvenc.c:1018
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
VE
#define VE
Definition: huffyuvenc.c:1016
AVCodec::id
enum AVCodecID id
Definition: codec.h:210
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1441
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
encode_422_bitstream
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
Definition: huffyuvenc.c:409
normal_options
static const AVOption normal_options[]
Definition: huffyuvenc.c:1027
src2
const pixel * src2
Definition: h264pred_template.c:422
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:423
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:51
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:405
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1300
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:442
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:410
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:415
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext
main external API structure.
Definition: avcodec.h:389
LOAD_GBRA
#define LOAD_GBRA
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
huffman.h
temp
else temp
Definition: vf_mcdeint.c:248
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_options
static const AVOption ff_options[]
Definition: huffyuvenc.c:1032
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: huffyuvenc.c:721
src0
const pixel *const src0
Definition: h264pred_template.c:420
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
LOAD2_14
#define LOAD2_14
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_ffvhuff_encoder
const FFCodec ff_ffvhuff_encoder
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
ff_huffyuv_common_init
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:153
encode_bgra_bitstream
static int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
Definition: huffyuvenc.c:671
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:416
huffyuv.h
put_bits.h
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:35
encode_plane_bitstream
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvenc.c:464
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:414
STAT_BGRA
#define STAT_BGRA
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:237