FFmpeg
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "put_bits.h"
36 #include "mathops.h"
37 #include "utvideo.h"
38 #include "huffman.h"
39 
40 typedef struct HuffEntry {
41  uint16_t sym;
42  uint8_t len;
43  uint32_t code;
44 } HuffEntry;
45 
46 /* Compare huffman tree nodes */
47 static int ut_huff_cmp_len(const void *a, const void *b)
48 {
49  const HuffEntry *aa = a, *bb = b;
50  return (aa->len - bb->len)*256 + aa->sym - bb->sym;
51 }
52 
53 /* Compare huffentry symbols */
54 static int huff_cmp_sym(const void *a, const void *b)
55 {
56  const HuffEntry *aa = a, *bb = b;
57  return aa->sym - bb->sym;
58 }
59 
61 {
62  UtvideoContext *c = avctx->priv_data;
63  int i;
64 
65  av_freep(&c->slice_bits);
66  for (i = 0; i < 4; i++)
67  av_freep(&c->slice_buffer[i]);
68 
69  return 0;
70 }
71 
73 {
74  UtvideoContext *c = avctx->priv_data;
75  int i, subsampled_height;
76  uint32_t original_format;
77 
78  c->avctx = avctx;
79  c->frame_info_size = 4;
80  c->slice_stride = FFALIGN(avctx->width, 32);
81 
82  switch (avctx->pix_fmt) {
83  case AV_PIX_FMT_GBRP:
84  c->planes = 3;
85  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
86  original_format = UTVIDEO_RGB;
87  break;
88  case AV_PIX_FMT_GBRAP:
89  c->planes = 4;
90  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
91  original_format = UTVIDEO_RGBA;
92  avctx->bits_per_coded_sample = 32;
93  break;
94  case AV_PIX_FMT_YUV420P:
95  if (avctx->width & 1 || avctx->height & 1) {
96  av_log(avctx, AV_LOG_ERROR,
97  "4:2:0 video requires even width and height.\n");
98  return AVERROR_INVALIDDATA;
99  }
100  c->planes = 3;
101  if (avctx->colorspace == AVCOL_SPC_BT709)
102  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
103  else
104  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
105  original_format = UTVIDEO_420;
106  break;
107  case AV_PIX_FMT_YUV422P:
108  if (avctx->width & 1) {
109  av_log(avctx, AV_LOG_ERROR,
110  "4:2:2 video requires even width.\n");
111  return AVERROR_INVALIDDATA;
112  }
113  c->planes = 3;
114  if (avctx->colorspace == AVCOL_SPC_BT709)
115  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
116  else
117  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
118  original_format = UTVIDEO_422;
119  break;
120  case AV_PIX_FMT_YUV444P:
121  c->planes = 3;
122  if (avctx->colorspace == AVCOL_SPC_BT709)
123  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
124  else
125  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
126  original_format = UTVIDEO_444;
127  break;
128  default:
129  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
130  avctx->pix_fmt);
131  return AVERROR_INVALIDDATA;
132  }
133 
134  ff_bswapdsp_init(&c->bdsp);
135  ff_llvidencdsp_init(&c->llvidencdsp);
136 
137  if (c->frame_pred == PRED_GRADIENT) {
138  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
140  }
141 
142  /*
143  * Check the asked slice count for obviously invalid
144  * values (> 256 or negative).
145  */
146  if (avctx->slices > 256 || avctx->slices < 0) {
147  av_log(avctx, AV_LOG_ERROR,
148  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
149  avctx->slices);
150  return AVERROR(EINVAL);
151  }
152 
153  /* Check that the slice count is not larger than the subsampled height */
154  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
155  if (avctx->slices > subsampled_height) {
156  av_log(avctx, AV_LOG_ERROR,
157  "Slice count %d is larger than the subsampling-applied height %d.\n",
158  avctx->slices, subsampled_height);
159  return AVERROR(EINVAL);
160  }
161 
162  /* extradata size is 4 * 32 bits */
163  avctx->extradata_size = 16;
164 
165  avctx->extradata = av_mallocz(avctx->extradata_size +
167 
168  if (!avctx->extradata) {
169  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
170  return AVERROR(ENOMEM);
171  }
172 
173  for (i = 0; i < c->planes; i++) {
174  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
176  if (!c->slice_buffer[i]) {
177  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
178  return AVERROR(ENOMEM);
179  }
180  }
181 
182  /*
183  * Set the version of the encoder.
184  * Last byte is "implementation ID", which is
185  * obtained from the creator of the format.
186  * Libavcodec has been assigned with the ID 0xF0.
187  */
188  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
189 
190  /*
191  * Set the "original format"
192  * Not used for anything during decoding.
193  */
194  AV_WL32(avctx->extradata + 4, original_format);
195 
196  /* Write 4 as the 'frame info size' */
197  AV_WL32(avctx->extradata + 8, c->frame_info_size);
198 
199  /*
200  * Set how many slices are going to be used.
201  * By default uses multiple slices depending on the subsampled height.
202  * This enables multithreading in the official decoder.
203  */
204  if (!avctx->slices) {
205  c->slices = subsampled_height / 120;
206 
207  if (!c->slices)
208  c->slices = 1;
209  else if (c->slices > 256)
210  c->slices = 256;
211  } else {
212  c->slices = avctx->slices;
213  }
214 
215  /* Set compression mode */
216  c->compression = COMP_HUFF;
217 
218  /*
219  * Set the encoding flags:
220  * - Slice count minus 1
221  * - Interlaced encoding mode flag, set to zero for now.
222  * - Compression mode (none/huff)
223  * And write the flags.
224  */
225  c->flags = (c->slices - 1) << 24;
226  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
227  c->flags |= c->compression;
228 
229  AV_WL32(avctx->extradata + 12, c->flags);
230 
231  return 0;
232 }
233 
234 static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
235  uint8_t *const src[4], int planes, const int stride[4],
236  int width, int height)
237 {
238  int i, j;
239  int k = 2 * dst_stride;
240  const uint8_t *sg = src[0];
241  const uint8_t *sb = src[1];
242  const uint8_t *sr = src[2];
243  const uint8_t *sa = src[3];
244  unsigned int g;
245 
246  for (j = 0; j < height; j++) {
247  if (planes == 3) {
248  for (i = 0; i < width; i++) {
249  g = sg[i];
250  dst[0][k] = g;
251  g += 0x80;
252  dst[1][k] = sb[i] - g;
253  dst[2][k] = sr[i] - g;
254  k++;
255  }
256  } else {
257  for (i = 0; i < width; i++) {
258  g = sg[i];
259  dst[0][k] = g;
260  g += 0x80;
261  dst[1][k] = sb[i] - g;
262  dst[2][k] = sr[i] - g;
263  dst[3][k] = sa[i];
264  k++;
265  }
266  sa += stride[3];
267  }
268  k += dst_stride - width;
269  sg += stride[0];
270  sb += stride[1];
271  sr += stride[2];
272  }
273 }
274 
275 #undef A
276 #undef B
277 
278 /* Write data to a plane with median prediction */
279 static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst,
280  ptrdiff_t stride, int width, int height)
281 {
282  int i, j;
283  int A, B;
284  uint8_t prev;
285 
286  /* First line uses left neighbour prediction */
287  prev = 0x80; /* Set the initial value */
288  for (i = 0; i < width; i++) {
289  *dst++ = src[i] - prev;
290  prev = src[i];
291  }
292 
293  if (height == 1)
294  return;
295 
296  src += stride;
297 
298  /*
299  * Second line uses top prediction for the first sample,
300  * and median for the rest.
301  */
302  A = B = 0;
303 
304  /* Rest of the coded part uses median prediction */
305  for (j = 1; j < height; j++) {
306  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
307  dst += width;
308  src += stride;
309  }
310 }
311 
312 /* Count the usage of values in a plane */
313 static void count_usage(uint8_t *src, int width,
314  int height, uint64_t *counts)
315 {
316  int i, j;
317 
318  for (j = 0; j < height; j++) {
319  for (i = 0; i < width; i++) {
320  counts[src[i]]++;
321  }
322  src += width;
323  }
324 }
325 
326 /* Calculate the actual huffman codes from the code lengths */
327 static void calculate_codes(HuffEntry *he)
328 {
329  int last, i;
330  uint32_t code;
331 
332  qsort(he, 256, sizeof(*he), ut_huff_cmp_len);
333 
334  last = 255;
335  while (he[last].len == 255 && last)
336  last--;
337 
338  code = 0;
339  for (i = last; i >= 0; i--) {
340  he[i].code = code >> (32 - he[i].len);
341  code += 0x80000000u >> (he[i].len - 1);
342  }
343 
344  qsort(he, 256, sizeof(*he), huff_cmp_sym);
345 }
346 
347 /* Write huffman bit codes to a memory block */
348 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
349  int width, int height, HuffEntry *he)
350 {
351  PutBitContext pb;
352  int i, j;
353  int count;
354 
355  init_put_bits(&pb, dst, dst_size);
356 
357  /* Write the codes */
358  for (j = 0; j < height; j++) {
359  for (i = 0; i < width; i++)
360  put_bits(&pb, he[src[i]].len, he[src[i]].code);
361 
362  src += width;
363  }
364 
365  /* Pad output to a 32-bit boundary */
366  count = put_bits_count(&pb) & 0x1F;
367 
368  if (count)
369  put_bits(&pb, 32 - count, 0);
370 
371  /* Flush the rest with zeroes */
372  flush_put_bits(&pb);
373 
374  /* Return the amount of bytes written */
375  return put_bytes_output(&pb);
376 }
377 
378 static int encode_plane(AVCodecContext *avctx, uint8_t *src,
379  uint8_t *dst, ptrdiff_t stride, int plane_no,
380  int width, int height, PutByteContext *pb)
381 {
382  UtvideoContext *c = avctx->priv_data;
383  uint8_t lengths[256];
384  uint64_t counts[256] = { 0 };
385 
386  HuffEntry he[256];
387 
388  uint32_t offset = 0, slice_len = 0;
389  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
390  int i, sstart, send = 0;
391  int symbol;
392  int ret;
393 
394  /* Do prediction / make planes */
395  switch (c->frame_pred) {
396  case PRED_NONE:
397  for (i = 0; i < c->slices; i++) {
398  sstart = send;
399  send = height * (i + 1) / c->slices & cmask;
400  av_image_copy_plane(dst + sstart * width, width,
401  src + sstart * stride, stride,
402  width, send - sstart);
403  }
404  break;
405  case PRED_LEFT:
406  for (i = 0; i < c->slices; i++) {
407  sstart = send;
408  send = height * (i + 1) / c->slices & cmask;
409  c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
410  }
411  break;
412  case PRED_MEDIAN:
413  for (i = 0; i < c->slices; i++) {
414  sstart = send;
415  send = height * (i + 1) / c->slices & cmask;
416  median_predict(c, src + sstart * stride, dst + sstart * width,
417  stride, width, send - sstart);
418  }
419  break;
420  default:
421  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
422  c->frame_pred);
424  }
425 
426  /* Count the usage of values */
427  count_usage(dst, width, height, counts);
428 
429  /* Check for a special case where only one symbol was used */
430  for (symbol = 0; symbol < 256; symbol++) {
431  /* If non-zero count is found, see if it matches width * height */
432  if (counts[symbol]) {
433  /* Special case if only one symbol was used */
434  if (counts[symbol] == width * (int64_t)height) {
435  /*
436  * Write a zero for the single symbol
437  * used in the plane, else 0xFF.
438  */
439  for (i = 0; i < 256; i++) {
440  if (i == symbol)
441  bytestream2_put_byte(pb, 0);
442  else
443  bytestream2_put_byte(pb, 0xFF);
444  }
445 
446  /* Write zeroes for lengths */
447  for (i = 0; i < c->slices; i++)
448  bytestream2_put_le32(pb, 0);
449 
450  /* And that's all for that plane folks */
451  return 0;
452  }
453  break;
454  }
455  }
456 
457  /* Calculate huffman lengths */
458  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
459  return ret;
460 
461  /*
462  * Write the plane's header into the output packet:
463  * - huffman code lengths (256 bytes)
464  * - slice end offsets (gotten from the slice lengths)
465  */
466  for (i = 0; i < 256; i++) {
467  bytestream2_put_byte(pb, lengths[i]);
468 
469  he[i].len = lengths[i];
470  he[i].sym = i;
471  }
472 
473  /* Calculate the huffman codes themselves */
474  calculate_codes(he);
475 
476  send = 0;
477  for (i = 0; i < c->slices; i++) {
478  sstart = send;
479  send = height * (i + 1) / c->slices & cmask;
480 
481  /*
482  * Write the huffman codes to a buffer,
483  * get the offset in bytes.
484  */
485  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
486  width * height + 4, width,
487  send - sstart, he);
488 
489  slice_len = offset - slice_len;
490 
491  /* Byteswap the written huffman codes */
492  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
493  (uint32_t *) c->slice_bits,
494  slice_len >> 2);
495 
496  /* Write the offset to the stream */
497  bytestream2_put_le32(pb, offset);
498 
499  /* Seek to the data part of the packet */
500  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
501  offset - slice_len, SEEK_CUR);
502 
503  /* Write the slices' data into the output packet */
504  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
505 
506  /* Seek back to the slice offsets */
507  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
508  SEEK_CUR);
509 
510  slice_len = offset;
511  }
512 
513  /* And at the end seek to the end of written slice(s) */
514  bytestream2_seek_p(pb, offset, SEEK_CUR);
515 
516  return 0;
517 }
518 
520  const AVFrame *pic, int *got_packet)
521 {
522  UtvideoContext *c = avctx->priv_data;
523  PutByteContext pb;
524 
525  uint32_t frame_info;
526 
527  uint8_t *dst;
528 
529  int width = avctx->width, height = avctx->height;
530  int i, ret = 0;
531 
532  /* Allocate a new packet if needed, and set it to the pointer dst */
533  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
534  c->planes + 4, 0);
535 
536  if (ret < 0)
537  return ret;
538 
539  dst = pkt->data;
540 
541  bytestream2_init_writer(&pb, dst, pkt->size);
542 
543  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
544 
545  if (!c->slice_bits) {
546  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
547  return AVERROR(ENOMEM);
548  }
549 
550  /* In case of RGB, mangle the planes to Ut Video's format */
551  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
552  mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data,
553  c->planes, pic->linesize, width, height);
554 
555  /* Deal with the planes */
556  switch (avctx->pix_fmt) {
557  case AV_PIX_FMT_GBRP:
558  case AV_PIX_FMT_GBRAP:
559  for (i = 0; i < c->planes; i++) {
560  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
561  c->slice_buffer[i], c->slice_stride, i,
562  width, height, &pb);
563 
564  if (ret) {
565  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
566  return ret;
567  }
568  }
569  break;
570  case AV_PIX_FMT_YUV444P:
571  for (i = 0; i < c->planes; i++) {
572  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
573  pic->linesize[i], i, width, height, &pb);
574 
575  if (ret) {
576  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
577  return ret;
578  }
579  }
580  break;
581  case AV_PIX_FMT_YUV422P:
582  for (i = 0; i < c->planes; i++) {
583  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
584  pic->linesize[i], i, width >> !!i, height, &pb);
585 
586  if (ret) {
587  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
588  return ret;
589  }
590  }
591  break;
592  case AV_PIX_FMT_YUV420P:
593  for (i = 0; i < c->planes; i++) {
594  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
595  pic->linesize[i], i, width >> !!i, height >> !!i,
596  &pb);
597 
598  if (ret) {
599  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
600  return ret;
601  }
602  }
603  break;
604  default:
605  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
606  avctx->pix_fmt);
607  return AVERROR_INVALIDDATA;
608  }
609 
610  /*
611  * Write frame information (LE 32-bit unsigned)
612  * into the output packet.
613  * Contains the prediction method.
614  */
615  frame_info = c->frame_pred << 8;
616  bytestream2_put_le32(&pb, frame_info);
617 
618  /*
619  * At least currently Ut Video is IDR only.
620  * Set flags accordingly.
621  */
623  pkt->size = bytestream2_tell_p(&pb);
624 
625  /* Packet should be done */
626  *got_packet = 1;
627 
628  return 0;
629 }
630 
631 #define OFFSET(x) offsetof(UtvideoContext, x)
632 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
633 static const AVOption options[] = {
634 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
635  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
636  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
637  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
638  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
639 
640  { NULL},
641 };
642 
643 static const AVClass utvideo_class = {
644  .class_name = "utvideo",
645  .item_name = av_default_item_name,
646  .option = options,
647  .version = LIBAVUTIL_VERSION_INT,
648 };
649 
651  .name = "utvideo",
652  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
653  .type = AVMEDIA_TYPE_VIDEO,
654  .id = AV_CODEC_ID_UTVIDEO,
655  .priv_data_size = sizeof(UtvideoContext),
656  .priv_class = &utvideo_class,
658  .encode2 = utvideo_encode_frame,
659  .close = utvideo_encode_close,
660  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
661  .pix_fmts = (const enum AVPixelFormat[]) {
664  },
666 };
utvideo.h
AVCodec
AVCodec.
Definition: codec.h:197
bswapdsp.h
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
UTVIDEO_422
@ UTVIDEO_422
Definition: utvideo.h:60
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:40
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1066
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:88
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
HuffEntry::len
uint8_t len
Definition: exr.c:94
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2541
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:478
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:219
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:365
count_usage
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:313
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
utvideo_encode_frame
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:519
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:396
UTVIDEO_RGBA
@ UTVIDEO_RGBA
Definition: utvideo.h:58
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
A
#define A(x)
Definition: vp56_arith.h:28
write_huff_codes
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:348
UTVIDEO_420
@ UTVIDEO_420
Definition: utvideo.h:59
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
av_cold
#define av_cold
Definition: attributes.h:90
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:39
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:603
width
#define width
intreadwrite.h
huff_cmp_sym
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:54
COMP_HUFF
@ COMP_HUFF
Definition: utvideo.h:46
g
const char * g
Definition: vf_curves.c:117
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
HuffEntry::sym
uint16_t sym
Definition: exr.c:95
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:289
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:202
mangle_rgb_planes
static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, uint8_t *const src[4], int planes, const int stride[4], int width, int height)
Definition: utvideoenc.c:234
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:49
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
src
#define src
Definition: vp8dsp.c:255
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
mathops.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
utvideo_encode_init
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:72
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
UTVIDEO_RGB
@ UTVIDEO_RGB
Definition: utvideo.h:57
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
PutByteContext
Definition: bytestream.h:37
AVPacket::size
int size
Definition: packet.h:366
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
VE
#define VE
Definition: utvideoenc.c:632
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
median_predict
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: utvideoenc.c:279
utvideo_encode_close
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:60
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:371
ut_huff_cmp_len
static int ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideoenc.c:47
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1524
i
int i
Definition: input.c:407
options
static const AVOption options[]
Definition: utvideoenc.c:633
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:79
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:602
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
UTVIDEO_444
@ UTVIDEO_444
Definition: utvideo.h:61
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
UtvideoContext
Definition: utvideo.h:64
len
int len
Definition: vorbis_enc_data.h:426
encode_plane
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:378
AVCodecContext::height
int height
Definition: avcodec.h:674
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:711
PRED_NONE
@ PRED_NONE
Definition: utvideo.h:38
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
HuffEntry::code
uint32_t code
Definition: exr.c:96
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:192
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext
main external API structure.
Definition: avcodec.h:501
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
huffman.h
ff_utvideo_encoder
const AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:650
HuffEntry
Definition: exr.c:93
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
planes
static const struct @322 planes[]
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:41
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
utvideo_class
static const AVClass utvideo_class
Definition: utvideoenc.c:643
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:526
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1089
AVPacket
This structure stores compressed data.
Definition: packet.h:342
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:528
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:674
bytestream.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:504
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:90
calculate_codes
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:327
OFFSET
#define OFFSET(x)
Definition: utvideoenc.c:631