FFmpeg
magicyuvenc.c
Go to the documentation of this file.
1 /*
2  * MagicYUV encoder
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/opt.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/qsort.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "put_bits.h"
32 #include "internal.h"
33 #include "thread.h"
34 #include "lossless_videoencdsp.h"
35 
36 typedef enum Prediction {
37  LEFT = 1,
40 } Prediction;
41 
42 typedef struct HuffEntry {
43  uint8_t len;
44  uint32_t code;
45 } HuffEntry;
46 
47 typedef struct PTable {
48  int value; ///< input value
49  int64_t prob; ///< number of occurences of this value in input
50 } PTable;
51 
52 typedef struct MagicYUVContext {
53  const AVClass *class;
56  int planes;
58  AVFrame *p;
59  int slice_height;
60  int nb_slices;
61  int correlate;
62  int hshift[4];
63  int vshift[4];
64  uint8_t *slices[4];
65  unsigned slice_pos[4];
66  unsigned tables_size;
67  HuffEntry he[4][256];
70  ptrdiff_t stride, int width, int height);
72 
74  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
75  int width, int height)
76 {
77  uint8_t prev = 0;
78  int i, j;
79 
80  for (i = 0; i < width; i++) {
81  dst[i] = src[i] - prev;
82  prev = src[i];
83  }
84  dst += width;
85  src += stride;
86  for (j = 1; j < height; j++) {
87  prev = src[-stride];
88  for (i = 0; i < width; i++) {
89  dst[i] = src[i] - prev;
90  prev = src[i];
91  }
92  dst += width;
93  src += stride;
94  }
95 }
96 
98  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
99  int width, int height)
100 {
101  int left = 0, top, lefttop;
102  int i, j;
103 
104  for (i = 0; i < width; i++) {
105  dst[i] = src[i] - left;
106  left = src[i];
107  }
108  dst += width;
109  src += stride;
110  for (j = 1; j < height; j++) {
111  top = src[-stride];
112  left = src[0] - top;
113  dst[0] = left;
114  for (i = 1; i < width; i++) {
115  top = src[i - stride];
116  lefttop = src[i - (stride + 1)];
117  left = src[i-1];
118  dst[i] = (src[i] - top) - left + lefttop;
119  }
120  dst += width;
121  src += stride;
122  }
123 }
124 
126  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
127  int width, int height)
128 {
129  int left = 0, lefttop;
130  int i, j;
131 
132  for (i = 0; i < width; i++) {
133  dst[i] = src[i] - left;
134  left = src[i];
135  }
136  dst += width;
137  src += stride;
138  for (j = 1; j < height; j++) {
139  left = lefttop = src[-stride];
140  s->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &left, &lefttop);
141  dst += width;
142  src += stride;
143  }
144 }
145 
147 {
148  MagicYUVContext *s = avctx->priv_data;
150  int i;
151 
152  switch (avctx->pix_fmt) {
153  case AV_PIX_FMT_GBRP:
154  avctx->codec_tag = MKTAG('M', '8', 'R', 'G');
155  s->correlate = 1;
156  s->format = 0x65;
157  break;
158  case AV_PIX_FMT_GBRAP:
159  avctx->codec_tag = MKTAG('M', '8', 'R', 'A');
160  s->correlate = 1;
161  s->format = 0x66;
162  break;
163  case AV_PIX_FMT_YUV420P:
164  avctx->codec_tag = MKTAG('M', '8', 'Y', '0');
165  s->hshift[1] =
166  s->vshift[1] =
167  s->hshift[2] =
168  s->vshift[2] = 1;
169  s->format = 0x69;
170  break;
171  case AV_PIX_FMT_YUV422P:
172  avctx->codec_tag = MKTAG('M', '8', 'Y', '2');
173  s->hshift[1] =
174  s->hshift[2] = 1;
175  s->format = 0x68;
176  break;
177  case AV_PIX_FMT_YUV444P:
178  avctx->codec_tag = MKTAG('M', '8', 'Y', '4');
179  s->format = 0x67;
180  break;
181  case AV_PIX_FMT_YUVA444P:
182  avctx->codec_tag = MKTAG('M', '8', 'Y', 'A');
183  s->format = 0x6a;
184  break;
185  case AV_PIX_FMT_GRAY8:
186  avctx->codec_tag = MKTAG('M', '8', 'G', '0');
187  s->format = 0x6b;
188  break;
189  default:
190  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n",
191  avctx->pix_fmt);
192  return AVERROR_INVALIDDATA;
193  }
194 
196 
198 
199  s->nb_slices = 1;
200 
201  for (i = 0; i < s->planes; i++) {
202  s->slices[i] = av_malloc(avctx->width * (avctx->height + 2) +
204  if (!s->slices[i]) {
205  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer.\n");
206  return AVERROR(ENOMEM);
207  }
208  }
209 
210  switch (s->frame_pred) {
211  case LEFT: s->predict = left_predict; break;
212  case GRADIENT: s->predict = gradient_predict; break;
213  case MEDIAN: s->predict = median_predict; break;
214  }
215 
216  avctx->extradata_size = 32;
217 
218  avctx->extradata = av_mallocz(avctx->extradata_size +
220 
221  if (!avctx->extradata) {
222  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
223  return AVERROR(ENOMEM);
224  }
225 
226  bytestream2_init_writer(&pb, avctx->extradata, avctx->extradata_size);
227  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
228  bytestream2_put_le32(&pb, 32);
229  bytestream2_put_byte(&pb, 7);
230  bytestream2_put_byte(&pb, s->format);
231  bytestream2_put_byte(&pb, 12);
232  bytestream2_put_byte(&pb, 0);
233 
234  bytestream2_put_byte(&pb, 0);
235  bytestream2_put_byte(&pb, 0);
236  bytestream2_put_byte(&pb, 32);
237  bytestream2_put_byte(&pb, 0);
238 
239  bytestream2_put_le32(&pb, avctx->width);
240  bytestream2_put_le32(&pb, avctx->height);
241  bytestream2_put_le32(&pb, avctx->width);
242  bytestream2_put_le32(&pb, avctx->height);
243 
244  return 0;
245 }
246 
247 static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
248 {
249  for (unsigned i = 32, nb_codes = 0; i > 0; i--) {
250  uint16_t curr = codes_count[i]; // # of leafs of length i
251  codes_count[i] = nb_codes / 2; // # of non-leaf nodes on level i
252  nb_codes = codes_count[i] + curr; // # of nodes on level i
253  }
254 
255  for (unsigned i = 0; i < 256; i++) {
256  he[i].code = codes_count[he[i].len];
257  codes_count[he[i].len]++;
258  }
259 }
260 
261 static void count_usage(uint8_t *src, int width,
262  int height, PTable *counts)
263 {
264  int i, j;
265 
266  for (j = 0; j < height; j++) {
267  for (i = 0; i < width; i++) {
268  counts[src[i]].prob++;
269  }
270  src += width;
271  }
272 }
273 
274 typedef struct PackageMergerList {
275  int nitems; ///< number of items in the list and probability ex. 4
276  int item_idx[515]; ///< index range for each item in items 0, 2, 5, 9, 13
277  int probability[514]; ///< probability of each item 3, 8, 18, 46
278  int items[257 * 16]; ///< chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
280 
281 static int compare_by_prob(const void *a, const void *b)
282 {
283  const PTable *a2 = a;
284  const PTable *b2 = b;
285  return a2->prob - b2->prob;
286 }
287 
288 static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts,
289  uint16_t codes_counts[33],
290  int size, int max_length)
291 {
292  PackageMergerList list_a, list_b, *to = &list_a, *from = &list_b, *temp;
293  int times, i, j, k;
294  int nbits[257] = {0};
295  int min;
296 
297  av_assert0(max_length > 0);
298 
299  to->nitems = 0;
300  from->nitems = 0;
301  to->item_idx[0] = 0;
302  from->item_idx[0] = 0;
303  AV_QSORT(prob_table, size, PTable, compare_by_prob);
304 
305  for (times = 0; times <= max_length; times++) {
306  to->nitems = 0;
307  to->item_idx[0] = 0;
308 
309  j = 0;
310  k = 0;
311 
312  if (times < max_length) {
313  i = 0;
314  }
315  while (i < size || j + 1 < from->nitems) {
316  to->nitems++;
317  to->item_idx[to->nitems] = to->item_idx[to->nitems - 1];
318  if (i < size &&
319  (j + 1 >= from->nitems ||
320  prob_table[i].prob <
321  from->probability[j] + from->probability[j + 1])) {
322  to->items[to->item_idx[to->nitems]++] = prob_table[i].value;
323  to->probability[to->nitems - 1] = prob_table[i].prob;
324  i++;
325  } else {
326  for (k = from->item_idx[j]; k < from->item_idx[j + 2]; k++) {
327  to->items[to->item_idx[to->nitems]++] = from->items[k];
328  }
329  to->probability[to->nitems - 1] =
330  from->probability[j] + from->probability[j + 1];
331  j += 2;
332  }
333  }
334  temp = to;
335  to = from;
336  from = temp;
337  }
338 
339  min = (size - 1 < from->nitems) ? size - 1 : from->nitems;
340  for (i = 0; i < from->item_idx[min]; i++) {
341  nbits[from->items[i]]++;
342  }
343 
344  for (i = 0; i < size; i++) {
345  distincts[i].len = nbits[i];
346  codes_counts[nbits[i]]++;
347  }
348 }
349 
350 static int encode_table(AVCodecContext *avctx, uint8_t *dst,
351  int width, int height,
352  PutBitContext *pb, HuffEntry *he)
353 {
354  PTable counts[256] = { {0} };
355  uint16_t codes_counts[33] = { 0 };
356  int i;
357 
358  count_usage(dst, width, height, counts);
359 
360  for (i = 0; i < 256; i++) {
361  counts[i].prob++;
362  counts[i].value = i;
363  }
364 
365  magy_huffman_compute_bits(counts, he, codes_counts, 256, 12);
366 
367  calculate_codes(he, codes_counts);
368 
369  for (i = 0; i < 256; i++) {
370  put_bits(pb, 1, 0);
371  put_bits(pb, 7, he[i].len);
372  }
373 
374  return 0;
375 }
376 
377 static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size,
378  int width, int height, HuffEntry *he, int prediction)
379 {
380  PutBitContext pb;
381  int i, j;
382  int count;
383 
384  init_put_bits(&pb, dst, dst_size);
385 
386  put_bits(&pb, 8, 0);
387  put_bits(&pb, 8, prediction);
388 
389  for (j = 0; j < height; j++) {
390  for (i = 0; i < width; i++) {
391  const int idx = src[i];
392  put_bits(&pb, he[idx].len, he[idx].code);
393  }
394 
395  src += width;
396  }
397 
398  count = put_bits_count(&pb) & 0x1F;
399 
400  if (count)
401  put_bits(&pb, 32 - count, 0);
402 
403  count = put_bits_count(&pb);
404 
405  flush_put_bits(&pb);
406 
407  return count >> 3;
408 }
409 
411  const AVFrame *frame, int *got_packet)
412 {
413  MagicYUVContext *s = avctx->priv_data;
414  PutByteContext pb;
415  const int width = avctx->width, height = avctx->height;
416  int pos, slice, i, j, ret = 0;
417 
418  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * s->nb_slices + width * height) *
419  s->planes + 256, 0);
420  if (ret < 0)
421  return ret;
422 
423  bytestream2_init_writer(&pb, pkt->data, pkt->size);
424  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
425  bytestream2_put_le32(&pb, 32); // header size
426  bytestream2_put_byte(&pb, 7); // version
427  bytestream2_put_byte(&pb, s->format);
428  bytestream2_put_byte(&pb, 12); // max huffman length
429  bytestream2_put_byte(&pb, 0);
430 
431  bytestream2_put_byte(&pb, 0);
432  bytestream2_put_byte(&pb, 0);
433  bytestream2_put_byte(&pb, 32); // coder type
434  bytestream2_put_byte(&pb, 0);
435 
436  bytestream2_put_le32(&pb, avctx->width);
437  bytestream2_put_le32(&pb, avctx->height);
438  bytestream2_put_le32(&pb, avctx->width);
439  bytestream2_put_le32(&pb, avctx->height);
440  bytestream2_put_le32(&pb, 0);
441 
442  for (i = 0; i < s->planes; i++) {
443  bytestream2_put_le32(&pb, 0);
444  for (j = 1; j < s->nb_slices; j++) {
445  bytestream2_put_le32(&pb, 0);
446  }
447  }
448 
449  bytestream2_put_byte(&pb, s->planes);
450 
451  for (i = 0; i < s->planes; i++) {
452  for (slice = 0; slice < s->nb_slices; slice++) {
453  bytestream2_put_byte(&pb, i);
454  }
455  }
456 
457  if (s->correlate) {
458  uint8_t *r, *g, *b;
459  AVFrame *p = av_frame_clone(frame);
460 
461  g = p->data[0];
462  b = p->data[1];
463  r = p->data[2];
464 
465  for (i = 0; i < height; i++) {
466  s->llvidencdsp.diff_bytes(b, b, g, width);
467  s->llvidencdsp.diff_bytes(r, r, g, width);
468  g += p->linesize[0];
469  b += p->linesize[1];
470  r += p->linesize[2];
471  }
472 
473  FFSWAP(uint8_t*, p->data[0], p->data[1]);
474  FFSWAP(int, p->linesize[0], p->linesize[1]);
475 
476  for (i = 0; i < s->planes; i++) {
477  for (slice = 0; slice < s->nb_slices; slice++) {
478  s->predict(s, p->data[i], s->slices[i], p->linesize[i],
479  p->width, p->height);
480  }
481  }
482 
483  av_frame_free(&p);
484  } else {
485  for (i = 0; i < s->planes; i++) {
486  for (slice = 0; slice < s->nb_slices; slice++) {
487  s->predict(s, frame->data[i], s->slices[i], frame->linesize[i],
488  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
489  AV_CEIL_RSHIFT(frame->height, s->vshift[i]));
490  }
491  }
492  }
493 
495 
496  for (i = 0; i < s->planes; i++) {
497  encode_table(avctx, s->slices[i],
498  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
499  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
500  &s->pb, s->he[i]);
501  }
502  s->tables_size = (put_bits_count(&s->pb) + 7) >> 3;
504 
505  for (i = 0; i < s->planes; i++) {
506  unsigned slice_size;
507 
508  s->slice_pos[i] = bytestream2_tell_p(&pb);
509  slice_size = encode_slice(s->slices[i], pkt->data + bytestream2_tell_p(&pb),
511  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
512  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
513  s->he[i], s->frame_pred);
514  bytestream2_skip_p(&pb, slice_size);
515  }
516 
517  pos = bytestream2_tell_p(&pb);
518  bytestream2_seek_p(&pb, 32, SEEK_SET);
519  bytestream2_put_le32(&pb, s->slice_pos[0] - 32);
520  for (i = 0; i < s->planes; i++) {
521  bytestream2_put_le32(&pb, s->slice_pos[i] - 32);
522  }
523  bytestream2_seek_p(&pb, pos, SEEK_SET);
524 
525  pkt->size = bytestream2_tell_p(&pb);
526  pkt->flags |= AV_PKT_FLAG_KEY;
527 
528  *got_packet = 1;
529 
530  return 0;
531 }
532 
534 {
535  MagicYUVContext *s = avctx->priv_data;
536  int i;
537 
538  for (i = 0; i < s->planes; i++)
539  av_freep(&s->slices[i]);
540 
541  return 0;
542 }
543 
544 #define OFFSET(x) offsetof(MagicYUVContext, x)
545 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
546 static const AVOption options[] = {
547  { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, {.i64=LEFT}, LEFT, MEDIAN, VE, "pred" },
548  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, 0, 0, VE, "pred" },
549  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = GRADIENT }, 0, 0, VE, "pred" },
550  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, 0, 0, VE, "pred" },
551  { NULL},
552 };
553 
554 static const AVClass magicyuv_class = {
555  .class_name = "magicyuv",
556  .item_name = av_default_item_name,
557  .option = options,
558  .version = LIBAVUTIL_VERSION_INT,
559 };
560 
562  .name = "magicyuv",
563  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
564  .type = AVMEDIA_TYPE_VIDEO,
565  .id = AV_CODEC_ID_MAGICYUV,
566  .priv_data_size = sizeof(MagicYUVContext),
567  .priv_class = &magicyuv_class,
569  .close = magy_encode_close,
570  .encode2 = magy_encode_frame,
571  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
572  .pix_fmts = (const enum AVPixelFormat[]) {
576  },
577  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
578 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
#define NULL
Definition: coverity.c:32
#define OFFSET(x)
Definition: magicyuvenc.c:544
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
static const AVOption options[]
Definition: magicyuvenc.c:546
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: packet.h:364
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
unsigned tables_size
Definition: magicyuvenc.c:66
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static void left_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:73
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static AVPacket pkt
static av_always_inline void predict(PredictorState *ps, float *coef, int output_enable)
Definition: aacdec.c:174
AVCodec.
Definition: codec.h:190
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
unsigned slice_pos[4]
Definition: magicyuvenc.c:65
PutBitContext pb
Definition: magicyuvenc.c:55
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int value
input value
Definition: magicyuvenc.c:48
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVOptions.
Multithreading support functions.
const char * from
Definition: jacosubdec.c:65
static av_cold int magy_encode_init(AVCodecContext *avctx)
Definition: magicyuvenc.c:146
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
HuffEntry he[4][256]
Definition: magicyuvenc.c:67
void(* diff_bytes)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w)
#define height
uint8_t * data
Definition: packet.h:363
static int encode_table(AVCodecContext *avctx, uint8_t *dst, int width, int height, PutBitContext *pb, HuffEntry *he)
Definition: magicyuvenc.c:350
#define VE
Definition: magicyuvenc.c:545
int hshift[4]
Definition: magicyuv.c:65
ptrdiff_t size
Definition: opengl_enc.c:100
uint32_t code
Definition: magicyuvenc.c:44
int nitems
number of items in the list and probability ex. 4
Definition: magicyuvenc.c:275
#define av_log(a,...)
const char * to
Definition: webvttdec.c:34
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:401
#define src
Definition: vp8dsp.c:254
static void median_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:125
static const struct @323 planes[]
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV&#39;s variant of median prediction.
void(* predict)(struct MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:69
static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
Definition: bytestream.h:163
static av_cold int magy_encode_close(AVCodecContext *avctx)
Definition: magicyuvenc.c:533
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
unsigned int pos
Definition: spdifenc.c:410
const char * name
Name of the codec implementation.
Definition: codec.h:197
int64_t prob
number of occurences of this value in input
Definition: magicyuvenc.c:49
GLsizei count
Definition: opengl_enc.c:108
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:81
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
Slice * slices[4]
Definition: magicyuv.c:67
LLVidEncDSPContext llvidencdsp
Definition: magicyuvenc.c:68
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
Definition: bytestream.h:180
static void gradient_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:97
int item_idx[515]
index range for each item in items 0, 2, 5, 9, 13
Definition: magicyuvenc.c:276
#define b
Definition: input.c:41
int vshift[4]
Definition: magicyuv.c:66
#define width
static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he, int prediction)
Definition: magicyuvenc.c:377
int width
picture width / height.
Definition: avcodec.h:699
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
Definition: magicyuvenc.c:247
static int magy_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: magicyuvenc.c:410
#define a2
Definition: regdef.h:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodec ff_magicyuv_encoder
Definition: magicyuvenc.c:561
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:553
Used to store intermediate lists in the package merge algorithm.
Definition: magicyuvenc.c:274
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
static int compare_by_prob(const void *a, const void *b)
Definition: magicyuvenc.c:281
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
main external API structure.
Definition: avcodec.h:526
Used to assign a occurrence count or "probability" to an input value.
Definition: magicyuvenc.c:47
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:551
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
int extradata_size
Definition: avcodec.h:628
Describe the class of an AVClass context structure.
Definition: log.h:67
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
int probability[514]
probability of each item 3, 8, 18, 46
Definition: magicyuvenc.c:277
uint8_t format
Definition: magicyuvenc.c:57
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal api header.
Prediction
Definition: magicyuvenc.c:36
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:115
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static const AVClass magicyuv_class
Definition: magicyuvenc.c:554
void * priv_data
Definition: avcodec.h:553
static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts, uint16_t codes_counts[33], int size, int max_length)
Definition: magicyuvenc.c:288
static void count_usage(uint8_t *src, int width, int height, PTable *counts)
Definition: magicyuvenc.c:261
int items[257 *16]
chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
Definition: magicyuvenc.c:278
int height
Definition: frame.h:366
#define av_freep(p)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
Definition: common.h:405
float min
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1587
bitstream writer API