FFmpeg
magicyuvenc.c
Go to the documentation of this file.
1 /*
2  * MagicYUV encoder
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/opt.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/qsort.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "encode.h"
32 #include "put_bits.h"
33 #include "internal.h"
34 #include "thread.h"
35 #include "lossless_videoencdsp.h"
36 
37 typedef enum Prediction {
38  LEFT = 1,
41 } Prediction;
42 
43 typedef struct HuffEntry {
44  uint8_t len;
45  uint32_t code;
46 } HuffEntry;
47 
48 typedef struct PTable {
49  int value; ///< input value
50  int64_t prob; ///< number of occurences of this value in input
51 } PTable;
52 
53 typedef struct MagicYUVContext {
54  const AVClass *class;
57  int planes;
58  uint8_t format;
59  AVFrame *p;
60  int slice_height;
61  int nb_slices;
62  int correlate;
63  int hshift[4];
64  int vshift[4];
65  uint8_t *slices[4];
66  unsigned slice_pos[4];
67  unsigned tables_size;
68  HuffEntry he[4][256];
70  void (*predict)(struct MagicYUVContext *s, uint8_t *src, uint8_t *dst,
71  ptrdiff_t stride, int width, int height);
73 
75  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
76  int width, int height)
77 {
78  uint8_t prev = 0;
79  int i, j;
80 
81  for (i = 0; i < width; i++) {
82  dst[i] = src[i] - prev;
83  prev = src[i];
84  }
85  dst += width;
86  src += stride;
87  for (j = 1; j < height; j++) {
88  prev = src[-stride];
89  for (i = 0; i < width; i++) {
90  dst[i] = src[i] - prev;
91  prev = src[i];
92  }
93  dst += width;
94  src += stride;
95  }
96 }
97 
99  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
100  int width, int height)
101 {
102  int left = 0, top, lefttop;
103  int i, j;
104 
105  for (i = 0; i < width; i++) {
106  dst[i] = src[i] - left;
107  left = src[i];
108  }
109  dst += width;
110  src += stride;
111  for (j = 1; j < height; j++) {
112  top = src[-stride];
113  left = src[0] - top;
114  dst[0] = left;
115  for (i = 1; i < width; i++) {
116  top = src[i - stride];
117  lefttop = src[i - (stride + 1)];
118  left = src[i-1];
119  dst[i] = (src[i] - top) - left + lefttop;
120  }
121  dst += width;
122  src += stride;
123  }
124 }
125 
127  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
128  int width, int height)
129 {
130  int left = 0, lefttop;
131  int i, j;
132 
133  for (i = 0; i < width; i++) {
134  dst[i] = src[i] - left;
135  left = src[i];
136  }
137  dst += width;
138  src += stride;
139  for (j = 1; j < height; j++) {
140  left = lefttop = src[-stride];
141  s->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &left, &lefttop);
142  dst += width;
143  src += stride;
144  }
145 }
146 
148 {
149  MagicYUVContext *s = avctx->priv_data;
151  int i;
152 
153  switch (avctx->pix_fmt) {
154  case AV_PIX_FMT_GBRP:
155  avctx->codec_tag = MKTAG('M', '8', 'R', 'G');
156  s->correlate = 1;
157  s->format = 0x65;
158  break;
159  case AV_PIX_FMT_GBRAP:
160  avctx->codec_tag = MKTAG('M', '8', 'R', 'A');
161  s->correlate = 1;
162  s->format = 0x66;
163  break;
164  case AV_PIX_FMT_YUV420P:
165  avctx->codec_tag = MKTAG('M', '8', 'Y', '0');
166  s->hshift[1] =
167  s->vshift[1] =
168  s->hshift[2] =
169  s->vshift[2] = 1;
170  s->format = 0x69;
171  break;
172  case AV_PIX_FMT_YUV422P:
173  avctx->codec_tag = MKTAG('M', '8', 'Y', '2');
174  s->hshift[1] =
175  s->hshift[2] = 1;
176  s->format = 0x68;
177  break;
178  case AV_PIX_FMT_YUV444P:
179  avctx->codec_tag = MKTAG('M', '8', 'Y', '4');
180  s->format = 0x67;
181  break;
182  case AV_PIX_FMT_YUVA444P:
183  avctx->codec_tag = MKTAG('M', '8', 'Y', 'A');
184  s->format = 0x6a;
185  break;
186  case AV_PIX_FMT_GRAY8:
187  avctx->codec_tag = MKTAG('M', '8', 'G', '0');
188  s->format = 0x6b;
189  break;
190  default:
191  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n",
192  avctx->pix_fmt);
193  return AVERROR_INVALIDDATA;
194  }
195 
196  ff_llvidencdsp_init(&s->llvidencdsp);
197 
198  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
199 
200  s->nb_slices = 1;
201 
202  for (i = 0; i < s->planes; i++) {
203  s->slices[i] = av_malloc(avctx->width * (avctx->height + 2) +
205  if (!s->slices[i]) {
206  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer.\n");
207  return AVERROR(ENOMEM);
208  }
209  }
210 
211  switch (s->frame_pred) {
212  case LEFT: s->predict = left_predict; break;
213  case GRADIENT: s->predict = gradient_predict; break;
214  case MEDIAN: s->predict = median_predict; break;
215  }
216 
217  avctx->extradata_size = 32;
218 
219  avctx->extradata = av_mallocz(avctx->extradata_size +
221 
222  if (!avctx->extradata) {
223  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
224  return AVERROR(ENOMEM);
225  }
226 
228  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
229  bytestream2_put_le32(&pb, 32);
230  bytestream2_put_byte(&pb, 7);
231  bytestream2_put_byte(&pb, s->format);
232  bytestream2_put_byte(&pb, 12);
233  bytestream2_put_byte(&pb, 0);
234 
235  bytestream2_put_byte(&pb, 0);
236  bytestream2_put_byte(&pb, 0);
237  bytestream2_put_byte(&pb, 32);
238  bytestream2_put_byte(&pb, 0);
239 
240  bytestream2_put_le32(&pb, avctx->width);
241  bytestream2_put_le32(&pb, avctx->height);
242  bytestream2_put_le32(&pb, avctx->width);
243  bytestream2_put_le32(&pb, avctx->height);
244 
245  return 0;
246 }
247 
248 static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
249 {
250  for (unsigned i = 32, nb_codes = 0; i > 0; i--) {
251  uint16_t curr = codes_count[i]; // # of leafs of length i
252  codes_count[i] = nb_codes / 2; // # of non-leaf nodes on level i
253  nb_codes = codes_count[i] + curr; // # of nodes on level i
254  }
255 
256  for (unsigned i = 0; i < 256; i++) {
257  he[i].code = codes_count[he[i].len];
258  codes_count[he[i].len]++;
259  }
260 }
261 
262 static void count_usage(uint8_t *src, int width,
263  int height, PTable *counts)
264 {
265  int i, j;
266 
267  for (j = 0; j < height; j++) {
268  for (i = 0; i < width; i++) {
269  counts[src[i]].prob++;
270  }
271  src += width;
272  }
273 }
274 
275 typedef struct PackageMergerList {
276  int nitems; ///< number of items in the list and probability ex. 4
277  int item_idx[515]; ///< index range for each item in items 0, 2, 5, 9, 13
278  int probability[514]; ///< probability of each item 3, 8, 18, 46
279  int items[257 * 16]; ///< chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
281 
282 static int compare_by_prob(const void *a, const void *b)
283 {
284  const PTable *a2 = a;
285  const PTable *b2 = b;
286  return a2->prob - b2->prob;
287 }
288 
289 static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts,
290  uint16_t codes_counts[33],
291  int size, int max_length)
292 {
293  PackageMergerList list_a, list_b, *to = &list_a, *from = &list_b, *temp;
294  int times, i, j, k;
295  int nbits[257] = {0};
296  int min;
297 
298  av_assert0(max_length > 0);
299 
300  to->nitems = 0;
301  from->nitems = 0;
302  to->item_idx[0] = 0;
303  from->item_idx[0] = 0;
304  AV_QSORT(prob_table, size, PTable, compare_by_prob);
305 
306  for (times = 0; times <= max_length; times++) {
307  to->nitems = 0;
308  to->item_idx[0] = 0;
309 
310  j = 0;
311  k = 0;
312 
313  if (times < max_length) {
314  i = 0;
315  }
316  while (i < size || j + 1 < from->nitems) {
317  to->nitems++;
318  to->item_idx[to->nitems] = to->item_idx[to->nitems - 1];
319  if (i < size &&
320  (j + 1 >= from->nitems ||
321  prob_table[i].prob <
322  from->probability[j] + from->probability[j + 1])) {
323  to->items[to->item_idx[to->nitems]++] = prob_table[i].value;
324  to->probability[to->nitems - 1] = prob_table[i].prob;
325  i++;
326  } else {
327  for (k = from->item_idx[j]; k < from->item_idx[j + 2]; k++) {
328  to->items[to->item_idx[to->nitems]++] = from->items[k];
329  }
330  to->probability[to->nitems - 1] =
331  from->probability[j] + from->probability[j + 1];
332  j += 2;
333  }
334  }
335  temp = to;
336  to = from;
337  from = temp;
338  }
339 
340  min = (size - 1 < from->nitems) ? size - 1 : from->nitems;
341  for (i = 0; i < from->item_idx[min]; i++) {
342  nbits[from->items[i]]++;
343  }
344 
345  for (i = 0; i < size; i++) {
346  distincts[i].len = nbits[i];
347  codes_counts[nbits[i]]++;
348  }
349 }
350 
351 static int encode_table(AVCodecContext *avctx, uint8_t *dst,
352  int width, int height,
353  PutBitContext *pb, HuffEntry *he)
354 {
355  PTable counts[256] = { {0} };
356  uint16_t codes_counts[33] = { 0 };
357  int i;
358 
359  count_usage(dst, width, height, counts);
360 
361  for (i = 0; i < 256; i++) {
362  counts[i].prob++;
363  counts[i].value = i;
364  }
365 
366  magy_huffman_compute_bits(counts, he, codes_counts, 256, 12);
367 
368  calculate_codes(he, codes_counts);
369 
370  for (i = 0; i < 256; i++) {
371  put_bits(pb, 1, 0);
372  put_bits(pb, 7, he[i].len);
373  }
374 
375  return 0;
376 }
377 
378 static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size,
379  int width, int height, HuffEntry *he, int prediction)
380 {
381  PutBitContext pb;
382  int i, j;
383  int count;
384 
385  init_put_bits(&pb, dst, dst_size);
386 
387  put_bits(&pb, 8, 0);
388  put_bits(&pb, 8, prediction);
389 
390  for (j = 0; j < height; j++) {
391  for (i = 0; i < width; i++) {
392  const int idx = src[i];
393  put_bits(&pb, he[idx].len, he[idx].code);
394  }
395 
396  src += width;
397  }
398 
399  count = put_bits_count(&pb) & 0x1F;
400 
401  if (count)
402  put_bits(&pb, 32 - count, 0);
403 
404  flush_put_bits(&pb);
405 
406  return put_bytes_output(&pb);
407 }
408 
410  const AVFrame *frame, int *got_packet)
411 {
412  MagicYUVContext *s = avctx->priv_data;
413  PutByteContext pb;
414  const int width = avctx->width, height = avctx->height;
415  int pos, slice, i, j, ret = 0;
416 
417  ret = ff_alloc_packet(avctx, pkt, (256 + 4 * s->nb_slices + width * height) *
418  s->planes + 256);
419  if (ret < 0)
420  return ret;
421 
423  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
424  bytestream2_put_le32(&pb, 32); // header size
425  bytestream2_put_byte(&pb, 7); // version
426  bytestream2_put_byte(&pb, s->format);
427  bytestream2_put_byte(&pb, 12); // max huffman length
428  bytestream2_put_byte(&pb, 0);
429 
430  bytestream2_put_byte(&pb, 0);
431  bytestream2_put_byte(&pb, 0);
432  bytestream2_put_byte(&pb, 32); // coder type
433  bytestream2_put_byte(&pb, 0);
434 
435  bytestream2_put_le32(&pb, avctx->width);
436  bytestream2_put_le32(&pb, avctx->height);
437  bytestream2_put_le32(&pb, avctx->width);
438  bytestream2_put_le32(&pb, avctx->height);
439  bytestream2_put_le32(&pb, 0);
440 
441  for (i = 0; i < s->planes; i++) {
442  bytestream2_put_le32(&pb, 0);
443  for (j = 1; j < s->nb_slices; j++) {
444  bytestream2_put_le32(&pb, 0);
445  }
446  }
447 
448  bytestream2_put_byte(&pb, s->planes);
449 
450  for (i = 0; i < s->planes; i++) {
451  for (slice = 0; slice < s->nb_slices; slice++) {
452  bytestream2_put_byte(&pb, i);
453  }
454  }
455 
456  if (s->correlate) {
457  uint8_t *r, *g, *b;
459 
460  g = p->data[0];
461  b = p->data[1];
462  r = p->data[2];
463 
464  for (i = 0; i < height; i++) {
465  s->llvidencdsp.diff_bytes(b, b, g, width);
466  s->llvidencdsp.diff_bytes(r, r, g, width);
467  g += p->linesize[0];
468  b += p->linesize[1];
469  r += p->linesize[2];
470  }
471 
472  FFSWAP(uint8_t*, p->data[0], p->data[1]);
473  FFSWAP(int, p->linesize[0], p->linesize[1]);
474 
475  for (i = 0; i < s->planes; i++) {
476  for (slice = 0; slice < s->nb_slices; slice++) {
477  s->predict(s, p->data[i], s->slices[i], p->linesize[i],
478  p->width, p->height);
479  }
480  }
481 
482  av_frame_free(&p);
483  } else {
484  for (i = 0; i < s->planes; i++) {
485  for (slice = 0; slice < s->nb_slices; slice++) {
486  s->predict(s, frame->data[i], s->slices[i], frame->linesize[i],
487  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
488  AV_CEIL_RSHIFT(frame->height, s->vshift[i]));
489  }
490  }
491  }
492 
494 
495  for (i = 0; i < s->planes; i++) {
496  encode_table(avctx, s->slices[i],
497  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
498  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
499  &s->pb, s->he[i]);
500  }
501  s->tables_size = put_bytes_count(&s->pb, 1);
502  bytestream2_skip_p(&pb, s->tables_size);
503 
504  for (i = 0; i < s->planes; i++) {
505  unsigned slice_size;
506 
507  s->slice_pos[i] = bytestream2_tell_p(&pb);
508  slice_size = encode_slice(s->slices[i], pkt->data + bytestream2_tell_p(&pb),
510  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
511  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
512  s->he[i], s->frame_pred);
513  bytestream2_skip_p(&pb, slice_size);
514  }
515 
516  pos = bytestream2_tell_p(&pb);
517  bytestream2_seek_p(&pb, 32, SEEK_SET);
518  bytestream2_put_le32(&pb, s->slice_pos[0] - 32);
519  for (i = 0; i < s->planes; i++) {
520  bytestream2_put_le32(&pb, s->slice_pos[i] - 32);
521  }
522  bytestream2_seek_p(&pb, pos, SEEK_SET);
523 
524  pkt->size = bytestream2_tell_p(&pb);
526 
527  *got_packet = 1;
528 
529  return 0;
530 }
531 
533 {
534  MagicYUVContext *s = avctx->priv_data;
535  int i;
536 
537  for (i = 0; i < s->planes; i++)
538  av_freep(&s->slices[i]);
539 
540  return 0;
541 }
542 
543 #define OFFSET(x) offsetof(MagicYUVContext, x)
544 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
545 static const AVOption options[] = {
546  { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, {.i64=LEFT}, LEFT, MEDIAN, VE, "pred" },
547  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, 0, 0, VE, "pred" },
548  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = GRADIENT }, 0, 0, VE, "pred" },
549  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, 0, 0, VE, "pred" },
550  { NULL},
551 };
552 
553 static const AVClass magicyuv_class = {
554  .class_name = "magicyuv",
555  .item_name = av_default_item_name,
556  .option = options,
557  .version = LIBAVUTIL_VERSION_INT,
558 };
559 
561  .name = "magicyuv",
562  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
563  .type = AVMEDIA_TYPE_VIDEO,
564  .id = AV_CODEC_ID_MAGICYUV,
565  .priv_data_size = sizeof(MagicYUVContext),
566  .priv_class = &magicyuv_class,
568  .close = magy_encode_close,
569  .encode2 = magy_encode_frame,
570  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
571  .pix_fmts = (const enum AVPixelFormat[]) {
575  },
577 };
AVCodec
AVCodec.
Definition: codec.h:197
GRADIENT
@ GRADIENT
Definition: magicyuvenc.c:39
stride
int stride
Definition: mace.c:144
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:67
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
VE
#define VE
Definition: magicyuvenc.c:544
gradient_predict
static void gradient_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:98
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
left_predict
static void left_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:74
Prediction
Definition: aptx.h:72
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:88
ff_magicyuv_encoder
const AVCodec ff_magicyuv_encoder
Definition: magicyuvenc.c:560
HuffEntry::len
uint8_t len
Definition: exr.c:94
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:58
compare_by_prob
static int compare_by_prob(const void *a, const void *b)
Definition: magicyuvenc.c:282
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:65
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
MagicYUVContext::predict
void(* predict)(struct MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:70
MagicYUVContext::he
HuffEntry he[4][256]
Definition: magicyuvenc.c:68
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
pixdesc.h
AVFrame::width
int width
Definition: frame.h:361
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
AVOption
AVOption.
Definition: opt.h:247
encode.h
b
#define b
Definition: input.c:40
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:99
PackageMergerList::item_idx
int item_idx[515]
index range for each item in items 0, 2, 5, 9, 13
Definition: magicyuvenc.c:277
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
count_usage
static void count_usage(uint8_t *src, int width, int height, PTable *counts)
Definition: magicyuvenc.c:262
MagicYUVContext::slice_pos
unsigned slice_pos[4]
Definition: magicyuvenc.c:66
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:404
thread.h
MagicYUVContext::pb
PutBitContext pb
Definition: magicyuvenc.c:56
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MagicYUVContext
Definition: magicyuv.c:53
encode_table
static int encode_table(AVCodecContext *avctx, uint8_t *dst, int width, int height, PutBitContext *pb, HuffEntry *he)
Definition: magicyuvenc.c:351
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2580
MagicYUVContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: magicyuvenc.c:69
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
magy_encode_frame
static int magy_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: magicyuvenc.c:409
calculate_codes
static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
Definition: magicyuvenc.c:248
magicyuv_class
static const AVClass magicyuv_class
Definition: magicyuvenc.c:553
OFFSET
#define OFFSET(x)
Definition: magicyuvenc.c:543
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
bytestream2_get_bytes_left_p
static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
Definition: bytestream.h:163
av_cold
#define av_cold
Definition: attributes.h:90
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:481
MEDIAN
@ MEDIAN
Definition: magicyuvenc.c:40
Prediction
Prediction
Definition: magicyuvenc.c:37
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
g
const char * g
Definition: vf_curves.c:117
MagicYUVContext::tables_size
unsigned tables_size
Definition: magicyuvenc.c:67
PackageMergerList::nitems
int nitems
number of items in the list and probability ex. 4
Definition: magicyuvenc.c:276
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:424
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PTable::prob
int64_t prob
number of occurences of this value in input
Definition: magicyuvenc.c:50
PutBitContext
Definition: put_bits.h:49
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:66
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PTable
Used to assign a occurrence count or "probability" to an input value.
Definition: magicyuvenc.c:48
NULL
#define NULL
Definition: coverity.c:32
median_predict
static void median_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:126
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
MagicYUVContext::correlate
int correlate
Definition: magicyuvenc.c:62
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:269
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PutByteContext
Definition: bytestream.h:37
qsort.h
PackageMergerList
Used to store intermediate lists in the package merge algorithm.
Definition: magicyuvenc.c:275
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:116
options
static const AVOption options[]
Definition: magicyuvenc.c:545
size
int size
Definition: twinvq_data.h:10344
height
#define height
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1666
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:57
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
PTable::value
int value
input value
Definition: magicyuvenc.c:49
magy_encode_close
static av_cold int magy_encode_close(AVCodecContext *avctx)
Definition: magicyuvenc.c:532
lossless_videoencdsp.h
MagicYUVContext::frame_pred
int frame_pred
Definition: magicyuvenc.c:55
i
int i
Definition: input.c:406
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:79
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:480
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
bytestream2_skip_p
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
Definition: bytestream.h:180
a2
#define a2
Definition: regdef.h:48
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
PackageMergerList::items
int items[257 *16]
chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D,...
Definition: magicyuvenc.c:279
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:552
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:589
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
LEFT
@ LEFT
Definition: magicyuvenc.c:38
avcodec.h
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
HuffEntry::code
uint32_t code
Definition: exr.c:96
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AVCodecContext
main external API structure.
Definition: avcodec.h:379
AVFrame::height
int height
Definition: frame.h:361
magy_encode_init
static av_cold int magy_encode_init(AVCodecContext *avctx)
Definition: magicyuvenc.c:147
PackageMergerList::probability
int probability[514]
probability of each item 3, 8, 18, 46
Definition: magicyuvenc.c:278
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
HuffEntry
Definition: exr.c:93
temp
else temp
Definition: vf_mcdeint.c:259
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:404
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:406
MagicYUVContext::planes
int planes
Definition: magicyuv.c:59
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:54
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:552
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:34
magy_huffman_compute_bits
static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts, uint16_t codes_counts[33], int size, int max_length)
Definition: magicyuvenc.c:289
encode_slice
static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he, int prediction)
Definition: magicyuvenc.c:378
MagicYUVContext::format
uint8_t format
Definition: magicyuvenc.c:58
min
float min
Definition: vorbis_enc_data.h:429