FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "put_bits.h"
36 #include "mathops.h"
37 #include "utvideo.h"
38 #include "huffman.h"
39 
40 /* Compare huffentry symbols */
41 static int huff_cmp_sym(const void *a, const void *b)
42 {
43  const HuffEntry *aa = a, *bb = b;
44  return aa->sym - bb->sym;
45 }
46 
48 {
49  UtvideoContext *c = avctx->priv_data;
50  int i;
51 
52  av_freep(&c->slice_bits);
53  for (i = 0; i < 4; i++)
54  av_freep(&c->slice_buffer[i]);
55 
56  return 0;
57 }
58 
60 {
61  UtvideoContext *c = avctx->priv_data;
62  int i, subsampled_height;
63  uint32_t original_format;
64 
65  c->avctx = avctx;
66  c->frame_info_size = 4;
67  c->slice_stride = FFALIGN(avctx->width, 32);
68 
69  switch (avctx->pix_fmt) {
70  case AV_PIX_FMT_GBRP:
71  c->planes = 3;
72  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
73  original_format = UTVIDEO_RGB;
74  break;
75  case AV_PIX_FMT_GBRAP:
76  c->planes = 4;
77  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
78  original_format = UTVIDEO_RGBA;
79  avctx->bits_per_coded_sample = 32;
80  break;
81  case AV_PIX_FMT_YUV420P:
82  if (avctx->width & 1 || avctx->height & 1) {
83  av_log(avctx, AV_LOG_ERROR,
84  "4:2:0 video requires even width and height.\n");
85  return AVERROR_INVALIDDATA;
86  }
87  c->planes = 3;
88  if (avctx->colorspace == AVCOL_SPC_BT709)
89  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
90  else
91  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
92  original_format = UTVIDEO_420;
93  break;
94  case AV_PIX_FMT_YUV422P:
95  if (avctx->width & 1) {
96  av_log(avctx, AV_LOG_ERROR,
97  "4:2:2 video requires even width.\n");
98  return AVERROR_INVALIDDATA;
99  }
100  c->planes = 3;
101  if (avctx->colorspace == AVCOL_SPC_BT709)
102  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
103  else
104  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
105  original_format = UTVIDEO_422;
106  break;
107  case AV_PIX_FMT_YUV444P:
108  c->planes = 3;
109  if (avctx->colorspace == AVCOL_SPC_BT709)
110  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
111  else
112  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
113  original_format = UTVIDEO_444;
114  break;
115  default:
116  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
117  avctx->pix_fmt);
118  return AVERROR_INVALIDDATA;
119  }
120 
121  ff_bswapdsp_init(&c->bdsp);
123 
124 #if FF_API_PRIVATE_OPT
126  /* Check the prediction method, and error out if unsupported */
127  if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
128  av_log(avctx, AV_LOG_WARNING,
129  "Prediction method %d is not supported in Ut Video.\n",
130  avctx->prediction_method);
132  }
133 
134  if (avctx->prediction_method == FF_PRED_PLANE) {
135  av_log(avctx, AV_LOG_ERROR,
136  "Plane prediction is not supported in Ut Video.\n");
138  }
139 
140  /* Convert from libavcodec prediction type to Ut Video's */
141  if (avctx->prediction_method)
144 #endif
145 
146  if (c->frame_pred == PRED_GRADIENT) {
147  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
149  }
150 
151  /*
152  * Check the asked slice count for obviously invalid
153  * values (> 256 or negative).
154  */
155  if (avctx->slices > 256 || avctx->slices < 0) {
156  av_log(avctx, AV_LOG_ERROR,
157  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
158  avctx->slices);
159  return AVERROR(EINVAL);
160  }
161 
162  /* Check that the slice count is not larger than the subsampled height */
163  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
164  if (avctx->slices > subsampled_height) {
165  av_log(avctx, AV_LOG_ERROR,
166  "Slice count %d is larger than the subsampling-applied height %d.\n",
167  avctx->slices, subsampled_height);
168  return AVERROR(EINVAL);
169  }
170 
171  /* extradata size is 4 * 32 bits */
172  avctx->extradata_size = 16;
173 
174  avctx->extradata = av_mallocz(avctx->extradata_size +
176 
177  if (!avctx->extradata) {
178  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
179  utvideo_encode_close(avctx);
180  return AVERROR(ENOMEM);
181  }
182 
183  for (i = 0; i < c->planes; i++) {
184  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
186  if (!c->slice_buffer[i]) {
187  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
188  utvideo_encode_close(avctx);
189  return AVERROR(ENOMEM);
190  }
191  }
192 
193  /*
194  * Set the version of the encoder.
195  * Last byte is "implementation ID", which is
196  * obtained from the creator of the format.
197  * Libavcodec has been assigned with the ID 0xF0.
198  */
199  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
200 
201  /*
202  * Set the "original format"
203  * Not used for anything during decoding.
204  */
205  AV_WL32(avctx->extradata + 4, original_format);
206 
207  /* Write 4 as the 'frame info size' */
208  AV_WL32(avctx->extradata + 8, c->frame_info_size);
209 
210  /*
211  * Set how many slices are going to be used.
212  * By default uses multiple slices depending on the subsampled height.
213  * This enables multithreading in the official decoder.
214  */
215  if (!avctx->slices) {
216  c->slices = subsampled_height / 120;
217 
218  if (!c->slices)
219  c->slices = 1;
220  else if (c->slices > 256)
221  c->slices = 256;
222  } else {
223  c->slices = avctx->slices;
224  }
225 
226  /* Set compression mode */
227  c->compression = COMP_HUFF;
228 
229  /*
230  * Set the encoding flags:
231  * - Slice count minus 1
232  * - Interlaced encoding mode flag, set to zero for now.
233  * - Compression mode (none/huff)
234  * And write the flags.
235  */
236  c->flags = (c->slices - 1) << 24;
237  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
238  c->flags |= c->compression;
239 
240  AV_WL32(avctx->extradata + 12, c->flags);
241 
242  return 0;
243 }
244 
245 static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
246  uint8_t *const src[4], int planes, const int stride[4],
247  int width, int height)
248 {
249  int i, j;
250  int k = 2 * dst_stride;
251  const uint8_t *sg = src[0];
252  const uint8_t *sb = src[1];
253  const uint8_t *sr = src[2];
254  const uint8_t *sa = src[3];
255  unsigned int g;
256 
257  for (j = 0; j < height; j++) {
258  if (planes == 3) {
259  for (i = 0; i < width; i++) {
260  g = sg[i];
261  dst[0][k] = g;
262  g += 0x80;
263  dst[1][k] = sb[i] - g;
264  dst[2][k] = sr[i] - g;
265  k++;
266  }
267  } else {
268  for (i = 0; i < width; i++) {
269  g = sg[i];
270  dst[0][k] = g;
271  g += 0x80;
272  dst[1][k] = sb[i] - g;
273  dst[2][k] = sr[i] - g;
274  dst[3][k] = sa[i];
275  k++;
276  }
277  sa += stride[3];
278  }
279  k += dst_stride - width;
280  sg += stride[0];
281  sb += stride[1];
282  sr += stride[2];
283  }
284 }
285 
286 #undef A
287 #undef B
288 
289 /* Write data to a plane with median prediction */
291  ptrdiff_t stride, int width, int height)
292 {
293  int i, j;
294  int A, B;
295  uint8_t prev;
296 
297  /* First line uses left neighbour prediction */
298  prev = 0x80; /* Set the initial value */
299  for (i = 0; i < width; i++) {
300  *dst++ = src[i] - prev;
301  prev = src[i];
302  }
303 
304  if (height == 1)
305  return;
306 
307  src += stride;
308 
309  /*
310  * Second line uses top prediction for the first sample,
311  * and median for the rest.
312  */
313  A = B = 0;
314 
315  /* Rest of the coded part uses median prediction */
316  for (j = 1; j < height; j++) {
317  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
318  dst += width;
319  src += stride;
320  }
321 }
322 
323 /* Count the usage of values in a plane */
324 static void count_usage(uint8_t *src, int width,
325  int height, uint64_t *counts)
326 {
327  int i, j;
328 
329  for (j = 0; j < height; j++) {
330  for (i = 0; i < width; i++) {
331  counts[src[i]]++;
332  }
333  src += width;
334  }
335 }
336 
337 /* Calculate the actual huffman codes from the code lengths */
338 static void calculate_codes(HuffEntry *he)
339 {
340  int last, i;
341  uint32_t code;
342 
343  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
344 
345  last = 255;
346  while (he[last].len == 255 && last)
347  last--;
348 
349  code = 1;
350  for (i = last; i >= 0; i--) {
351  he[i].code = code >> (32 - he[i].len);
352  code += 0x80000000u >> (he[i].len - 1);
353  }
354 
355  qsort(he, 256, sizeof(*he), huff_cmp_sym);
356 }
357 
358 /* Write huffman bit codes to a memory block */
359 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
360  int width, int height, HuffEntry *he)
361 {
362  PutBitContext pb;
363  int i, j;
364  int count;
365 
366  init_put_bits(&pb, dst, dst_size);
367 
368  /* Write the codes */
369  for (j = 0; j < height; j++) {
370  for (i = 0; i < width; i++)
371  put_bits(&pb, he[src[i]].len, he[src[i]].code);
372 
373  src += width;
374  }
375 
376  /* Pad output to a 32-bit boundary */
377  count = put_bits_count(&pb) & 0x1F;
378 
379  if (count)
380  put_bits(&pb, 32 - count, 0);
381 
382  /* Get the amount of bits written */
383  count = put_bits_count(&pb);
384 
385  /* Flush the rest with zeroes */
386  flush_put_bits(&pb);
387 
388  return count;
389 }
390 
392  uint8_t *dst, ptrdiff_t stride, int plane_no,
393  int width, int height, PutByteContext *pb)
394 {
395  UtvideoContext *c = avctx->priv_data;
396  uint8_t lengths[256];
397  uint64_t counts[256] = { 0 };
398 
399  HuffEntry he[256];
400 
401  uint32_t offset = 0, slice_len = 0;
402  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
403  int i, sstart, send = 0;
404  int symbol;
405  int ret;
406 
407  /* Do prediction / make planes */
408  switch (c->frame_pred) {
409  case PRED_NONE:
410  for (i = 0; i < c->slices; i++) {
411  sstart = send;
412  send = height * (i + 1) / c->slices & cmask;
413  av_image_copy_plane(dst + sstart * width, width,
414  src + sstart * stride, stride,
415  width, send - sstart);
416  }
417  break;
418  case PRED_LEFT:
419  for (i = 0; i < c->slices; i++) {
420  sstart = send;
421  send = height * (i + 1) / c->slices & cmask;
422  c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
423  }
424  break;
425  case PRED_MEDIAN:
426  for (i = 0; i < c->slices; i++) {
427  sstart = send;
428  send = height * (i + 1) / c->slices & cmask;
429  median_predict(c, src + sstart * stride, dst + sstart * width,
430  stride, width, send - sstart);
431  }
432  break;
433  default:
434  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
435  c->frame_pred);
437  }
438 
439  /* Count the usage of values */
440  count_usage(dst, width, height, counts);
441 
442  /* Check for a special case where only one symbol was used */
443  for (symbol = 0; symbol < 256; symbol++) {
444  /* If non-zero count is found, see if it matches width * height */
445  if (counts[symbol]) {
446  /* Special case if only one symbol was used */
447  if (counts[symbol] == width * (int64_t)height) {
448  /*
449  * Write a zero for the single symbol
450  * used in the plane, else 0xFF.
451  */
452  for (i = 0; i < 256; i++) {
453  if (i == symbol)
454  bytestream2_put_byte(pb, 0);
455  else
456  bytestream2_put_byte(pb, 0xFF);
457  }
458 
459  /* Write zeroes for lengths */
460  for (i = 0; i < c->slices; i++)
461  bytestream2_put_le32(pb, 0);
462 
463  /* And that's all for that plane folks */
464  return 0;
465  }
466  break;
467  }
468  }
469 
470  /* Calculate huffman lengths */
471  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
472  return ret;
473 
474  /*
475  * Write the plane's header into the output packet:
476  * - huffman code lengths (256 bytes)
477  * - slice end offsets (gotten from the slice lengths)
478  */
479  for (i = 0; i < 256; i++) {
480  bytestream2_put_byte(pb, lengths[i]);
481 
482  he[i].len = lengths[i];
483  he[i].sym = i;
484  }
485 
486  /* Calculate the huffman codes themselves */
487  calculate_codes(he);
488 
489  send = 0;
490  for (i = 0; i < c->slices; i++) {
491  sstart = send;
492  send = height * (i + 1) / c->slices & cmask;
493 
494  /*
495  * Write the huffman codes to a buffer,
496  * get the offset in bits and convert to bytes.
497  */
498  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
499  width * height + 4, width,
500  send - sstart, he) >> 3;
501 
502  slice_len = offset - slice_len;
503 
504  /* Byteswap the written huffman codes */
505  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
506  (uint32_t *) c->slice_bits,
507  slice_len >> 2);
508 
509  /* Write the offset to the stream */
510  bytestream2_put_le32(pb, offset);
511 
512  /* Seek to the data part of the packet */
513  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
514  offset - slice_len, SEEK_CUR);
515 
516  /* Write the slices' data into the output packet */
517  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
518 
519  /* Seek back to the slice offsets */
520  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
521  SEEK_CUR);
522 
523  slice_len = offset;
524  }
525 
526  /* And at the end seek to the end of written slice(s) */
527  bytestream2_seek_p(pb, offset, SEEK_CUR);
528 
529  return 0;
530 }
531 
533  const AVFrame *pic, int *got_packet)
534 {
535  UtvideoContext *c = avctx->priv_data;
536  PutByteContext pb;
537 
538  uint32_t frame_info;
539 
540  uint8_t *dst;
541 
542  int width = avctx->width, height = avctx->height;
543  int i, ret = 0;
544 
545  /* Allocate a new packet if needed, and set it to the pointer dst */
546  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
547  c->planes + 4, 0);
548 
549  if (ret < 0)
550  return ret;
551 
552  dst = pkt->data;
553 
554  bytestream2_init_writer(&pb, dst, pkt->size);
555 
556  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
557 
558  if (!c->slice_bits) {
559  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
560  return AVERROR(ENOMEM);
561  }
562 
563  /* In case of RGB, mangle the planes to Ut Video's format */
564  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
566  c->planes, pic->linesize, width, height);
567 
568  /* Deal with the planes */
569  switch (avctx->pix_fmt) {
570  case AV_PIX_FMT_GBRP:
571  case AV_PIX_FMT_GBRAP:
572  for (i = 0; i < c->planes; i++) {
573  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
574  c->slice_buffer[i], c->slice_stride, i,
575  width, height, &pb);
576 
577  if (ret) {
578  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
579  return ret;
580  }
581  }
582  break;
583  case AV_PIX_FMT_YUV444P:
584  for (i = 0; i < c->planes; i++) {
585  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
586  pic->linesize[i], i, width, height, &pb);
587 
588  if (ret) {
589  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
590  return ret;
591  }
592  }
593  break;
594  case AV_PIX_FMT_YUV422P:
595  for (i = 0; i < c->planes; i++) {
596  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
597  pic->linesize[i], i, width >> !!i, height, &pb);
598 
599  if (ret) {
600  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
601  return ret;
602  }
603  }
604  break;
605  case AV_PIX_FMT_YUV420P:
606  for (i = 0; i < c->planes; i++) {
607  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
608  pic->linesize[i], i, width >> !!i, height >> !!i,
609  &pb);
610 
611  if (ret) {
612  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
613  return ret;
614  }
615  }
616  break;
617  default:
618  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
619  avctx->pix_fmt);
620  return AVERROR_INVALIDDATA;
621  }
622 
623  /*
624  * Write frame information (LE 32-bit unsigned)
625  * into the output packet.
626  * Contains the prediction method.
627  */
628  frame_info = c->frame_pred << 8;
629  bytestream2_put_le32(&pb, frame_info);
630 
631  /*
632  * At least currently Ut Video is IDR only.
633  * Set flags accordingly.
634  */
635 #if FF_API_CODED_FRAME
637  avctx->coded_frame->key_frame = 1;
640 #endif
641 
642  pkt->size = bytestream2_tell_p(&pb);
643  pkt->flags |= AV_PKT_FLAG_KEY;
644 
645  /* Packet should be done */
646  *got_packet = 1;
647 
648  return 0;
649 }
650 
651 #define OFFSET(x) offsetof(UtvideoContext, x)
652 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
653 static const AVOption options[] = {
654 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
655  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
656  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
657  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
658  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
659 
660  { NULL},
661 };
662 
663 static const AVClass utvideo_class = {
664  .class_name = "utvideo",
665  .item_name = av_default_item_name,
666  .option = options,
667  .version = LIBAVUTIL_VERSION_INT,
668 };
669 
671  .name = "utvideo",
672  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
673  .type = AVMEDIA_TYPE_VIDEO,
674  .id = AV_CODEC_ID_UTVIDEO,
675  .priv_data_size = sizeof(UtvideoContext),
676  .priv_class = &utvideo_class,
678  .encode2 = utvideo_encode_frame,
679  .close = utvideo_encode_close,
681  .pix_fmts = (const enum AVPixelFormat[]) {
684  },
685 };
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:475
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:391
AVOption.
Definition: opt.h:246
uint32_t flags
Definition: utvideo.h:75
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:207
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:41
const char * g
Definition: vf_curves.c:112
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int slice_bits_size
Definition: utvideo.h:86
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:164
int size
Definition: avcodec.h:1431
const char * b
Definition: vf_curves.c:113
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
LLVidEncDSPContext llvidencdsp
Definition: utvideo.h:73
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1727
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:59
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: utvideoenc.c:290
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3408
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1045
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:670
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1618
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:344
#define height
uint8_t * data
Definition: avcodec.h:1430
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:532
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2734
uint32_t code
Definition: magicyuv.c:50
#define A(x)
Definition: vp56_arith.h:28
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1462
BswapDSPContext bdsp
Definition: utvideo.h:71
const int ff_ut_pred_order[5]
Definition: utvideo.c:30
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define AVERROR(e)
Definition: error.h:43
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
AVCodecContext * avctx
Definition: utvideo.h:69
uint16_t width
Definition: gdv.c:47
const char * name
Name of the codec implementation.
Definition: avcodec.h:3415
uint32_t frame_info_size
Definition: utvideo.h:75
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:359
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1015
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1436
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV's variant of median prediction.
int compression
Definition: utvideo.h:78
static const AVOption options[]
Definition: utvideoenc.c:653
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
int width
picture width / height.
Definition: avcodec.h:1690
static const AVClass utvideo_class
Definition: utvideoenc.c:663
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
ptrdiff_t slice_stride
Definition: utvideo.h:84
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
void(* sub_left_predict)(uint8_t *dst, uint8_t *src, ptrdiff_t stride, ptrdiff_t width, int height)
#define OFFSET(x)
Definition: utvideoenc.c:651
Common Ut Video header.
int frame_pred
Definition: utvideo.h:80
uint8_t len
Definition: magicyuv.c:49
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
Libavcodec external API header.
attribute_deprecated int prediction_method
Definition: avcodec.h:1876
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
main external API structure.
Definition: avcodec.h:1518
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1543
int extradata_size
Definition: avcodec.h:1619
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2141
huffman tree builder and VLC generator
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
uint8_t * slice_bits
Definition: utvideo.h:85
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, uint8_t *const src[4], int planes, const int stride[4], int width, int height)
Definition: utvideoenc.c:245
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:338
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:324
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:211
static double c[64]
uint16_t sym
Definition: magicyuv.c:48
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2760
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
static const struct @272 planes[]
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:773
int slices
Number of slices.
Definition: avcodec.h:2164
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1545
uint8_t * slice_buffer[4]
Definition: utvideo.h:85
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int len
#define FF_PRED_PLANE
Definition: avcodec.h:1878
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:296
#define av_freep(p)
#define VE
Definition: utvideoenc.c:652
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:366
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:47
This structure stores compressed data.
Definition: avcodec.h:1407
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
bitstream writer API