FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "put_bits.h"
36 #include "huffyuvencdsp.h"
37 #include "mathops.h"
38 #include "utvideo.h"
39 #include "huffman.h"
40 
41 /* Compare huffentry symbols */
42 static int huff_cmp_sym(const void *a, const void *b)
43 {
44  const HuffEntry *aa = a, *bb = b;
45  return aa->sym - bb->sym;
46 }
47 
49 {
50  UtvideoContext *c = avctx->priv_data;
51  int i;
52 
53  av_freep(&c->slice_bits);
54  for (i = 0; i < 4; i++)
55  av_freep(&c->slice_buffer[i]);
56 
57  return 0;
58 }
59 
61 {
62  UtvideoContext *c = avctx->priv_data;
63  int i, subsampled_height;
64  uint32_t original_format;
65 
66  c->avctx = avctx;
67  c->frame_info_size = 4;
68  c->slice_stride = FFALIGN(avctx->width, 32);
69 
70  switch (avctx->pix_fmt) {
71  case AV_PIX_FMT_RGB24:
72  c->planes = 3;
73  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
74  original_format = UTVIDEO_RGB;
75  break;
76  case AV_PIX_FMT_RGBA:
77  c->planes = 4;
78  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
79  original_format = UTVIDEO_RGBA;
80  avctx->bits_per_coded_sample = 32;
81  break;
82  case AV_PIX_FMT_YUV420P:
83  if (avctx->width & 1 || avctx->height & 1) {
84  av_log(avctx, AV_LOG_ERROR,
85  "4:2:0 video requires even width and height.\n");
86  return AVERROR_INVALIDDATA;
87  }
88  c->planes = 3;
89  if (avctx->colorspace == AVCOL_SPC_BT709)
90  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
91  else
92  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
93  original_format = UTVIDEO_420;
94  break;
95  case AV_PIX_FMT_YUV422P:
96  if (avctx->width & 1) {
97  av_log(avctx, AV_LOG_ERROR,
98  "4:2:2 video requires even width.\n");
99  return AVERROR_INVALIDDATA;
100  }
101  c->planes = 3;
102  if (avctx->colorspace == AVCOL_SPC_BT709)
103  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
104  else
105  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
106  original_format = UTVIDEO_422;
107  break;
108  case AV_PIX_FMT_YUV444P:
109  c->planes = 3;
110  if (avctx->colorspace == AVCOL_SPC_BT709)
111  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
112  else
113  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
114  original_format = UTVIDEO_444;
115  break;
116  default:
117  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
118  avctx->pix_fmt);
119  return AVERROR_INVALIDDATA;
120  }
121 
122  ff_bswapdsp_init(&c->bdsp);
124 
125 #if FF_API_PRIVATE_OPT
127  /* Check the prediction method, and error out if unsupported */
128  if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
129  av_log(avctx, AV_LOG_WARNING,
130  "Prediction method %d is not supported in Ut Video.\n",
131  avctx->prediction_method);
133  }
134 
135  if (avctx->prediction_method == FF_PRED_PLANE) {
136  av_log(avctx, AV_LOG_ERROR,
137  "Plane prediction is not supported in Ut Video.\n");
139  }
140 
141  /* Convert from libavcodec prediction type to Ut Video's */
142  if (avctx->prediction_method)
145 #endif
146 
147  if (c->frame_pred == PRED_GRADIENT) {
148  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
150  }
151 
152  /*
153  * Check the asked slice count for obviously invalid
154  * values (> 256 or negative).
155  */
156  if (avctx->slices > 256 || avctx->slices < 0) {
157  av_log(avctx, AV_LOG_ERROR,
158  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
159  avctx->slices);
160  return AVERROR(EINVAL);
161  }
162 
163  /* Check that the slice count is not larger than the subsampled height */
164  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
165  if (avctx->slices > subsampled_height) {
166  av_log(avctx, AV_LOG_ERROR,
167  "Slice count %d is larger than the subsampling-applied height %d.\n",
168  avctx->slices, subsampled_height);
169  return AVERROR(EINVAL);
170  }
171 
172  /* extradata size is 4 * 32 bits */
173  avctx->extradata_size = 16;
174 
175  avctx->extradata = av_mallocz(avctx->extradata_size +
177 
178  if (!avctx->extradata) {
179  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
180  utvideo_encode_close(avctx);
181  return AVERROR(ENOMEM);
182  }
183 
184  for (i = 0; i < c->planes; i++) {
185  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
187  if (!c->slice_buffer[i]) {
188  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
189  utvideo_encode_close(avctx);
190  return AVERROR(ENOMEM);
191  }
192  }
193 
194  /*
195  * Set the version of the encoder.
196  * Last byte is "implementation ID", which is
197  * obtained from the creator of the format.
198  * Libavcodec has been assigned with the ID 0xF0.
199  */
200  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
201 
202  /*
203  * Set the "original format"
204  * Not used for anything during decoding.
205  */
206  AV_WL32(avctx->extradata + 4, original_format);
207 
208  /* Write 4 as the 'frame info size' */
209  AV_WL32(avctx->extradata + 8, c->frame_info_size);
210 
211  /*
212  * Set how many slices are going to be used.
213  * By default uses multiple slices depending on the subsampled height.
214  * This enables multithreading in the official decoder.
215  */
216  if (!avctx->slices) {
217  c->slices = subsampled_height / 120;
218 
219  if (!c->slices)
220  c->slices = 1;
221  else if (c->slices > 256)
222  c->slices = 256;
223  } else {
224  c->slices = avctx->slices;
225  }
226 
227  /* Set compression mode */
228  c->compression = COMP_HUFF;
229 
230  /*
231  * Set the encoding flags:
232  * - Slice count minus 1
233  * - Interlaced encoding mode flag, set to zero for now.
234  * - Compression mode (none/huff)
235  * And write the flags.
236  */
237  c->flags = (c->slices - 1) << 24;
238  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
239  c->flags |= c->compression;
240 
241  AV_WL32(avctx->extradata + 12, c->flags);
242 
243  return 0;
244 }
245 
246 static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src,
247  int step, int stride, int width, int height)
248 {
249  int i, j;
250  int k = 2 * dst_stride;
251  unsigned int g;
252 
253  for (j = 0; j < height; j++) {
254  if (step == 3) {
255  for (i = 0; i < width * step; i += step) {
256  g = src[i + 1];
257  dst[0][k] = g;
258  g += 0x80;
259  dst[1][k] = src[i + 2] - g;
260  dst[2][k] = src[i + 0] - g;
261  k++;
262  }
263  } else {
264  for (i = 0; i < width * step; i += step) {
265  g = src[i + 1];
266  dst[0][k] = g;
267  g += 0x80;
268  dst[1][k] = src[i + 2] - g;
269  dst[2][k] = src[i + 0] - g;
270  dst[3][k] = src[i + 3];
271  k++;
272  }
273  }
274  k += dst_stride - width;
275  src += stride;
276  }
277 }
278 
279 /* Write data to a plane with left prediction */
280 static void left_predict(uint8_t *src, uint8_t *dst, int stride,
281  int width, int height)
282 {
283  int i, j;
284  uint8_t prev;
285 
286  prev = 0x80; /* Set the initial value */
287  for (j = 0; j < height; j++) {
288  for (i = 0; i < width; i++) {
289  *dst++ = src[i] - prev;
290  prev = src[i];
291  }
292  src += stride;
293  }
294 }
295 
296 /* Write data to a plane with median prediction */
298  int width, int height)
299 {
300  int i, j;
301  int A, B;
302  uint8_t prev;
303 
304  /* First line uses left neighbour prediction */
305  prev = 0x80; /* Set the initial value */
306  for (i = 0; i < width; i++) {
307  *dst++ = src[i] - prev;
308  prev = src[i];
309  }
310 
311  if (height == 1)
312  return;
313 
314  src += stride;
315 
316  /*
317  * Second line uses top prediction for the first sample,
318  * and median for the rest.
319  */
320  A = B = 0;
321 
322  /* Rest of the coded part uses median prediction */
323  for (j = 1; j < height; j++) {
324  c->hdsp.sub_hfyu_median_pred(dst, src - stride, src, width, &A, &B);
325  dst += width;
326  src += stride;
327  }
328 }
329 
330 /* Count the usage of values in a plane */
331 static void count_usage(uint8_t *src, int width,
332  int height, uint64_t *counts)
333 {
334  int i, j;
335 
336  for (j = 0; j < height; j++) {
337  for (i = 0; i < width; i++) {
338  counts[src[i]]++;
339  }
340  src += width;
341  }
342 }
343 
344 /* Calculate the actual huffman codes from the code lengths */
345 static void calculate_codes(HuffEntry *he)
346 {
347  int last, i;
348  uint32_t code;
349 
350  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
351 
352  last = 255;
353  while (he[last].len == 255 && last)
354  last--;
355 
356  code = 1;
357  for (i = last; i >= 0; i--) {
358  he[i].code = code >> (32 - he[i].len);
359  code += 0x80000000u >> (he[i].len - 1);
360  }
361 
362  qsort(he, 256, sizeof(*he), huff_cmp_sym);
363 }
364 
365 /* Write huffman bit codes to a memory block */
366 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
367  int width, int height, HuffEntry *he)
368 {
369  PutBitContext pb;
370  int i, j;
371  int count;
372 
373  init_put_bits(&pb, dst, dst_size);
374 
375  /* Write the codes */
376  for (j = 0; j < height; j++) {
377  for (i = 0; i < width; i++)
378  put_bits(&pb, he[src[i]].len, he[src[i]].code);
379 
380  src += width;
381  }
382 
383  /* Pad output to a 32-bit boundary */
384  count = put_bits_count(&pb) & 0x1F;
385 
386  if (count)
387  put_bits(&pb, 32 - count, 0);
388 
389  /* Get the amount of bits written */
390  count = put_bits_count(&pb);
391 
392  /* Flush the rest with zeroes */
393  flush_put_bits(&pb);
394 
395  return count;
396 }
397 
399  uint8_t *dst, int stride, int plane_no,
400  int width, int height, PutByteContext *pb)
401 {
402  UtvideoContext *c = avctx->priv_data;
403  uint8_t lengths[256];
404  uint64_t counts[256] = { 0 };
405 
406  HuffEntry he[256];
407 
408  uint32_t offset = 0, slice_len = 0;
409  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
410  int i, sstart, send = 0;
411  int symbol;
412  int ret;
413 
414  /* Do prediction / make planes */
415  switch (c->frame_pred) {
416  case PRED_NONE:
417  for (i = 0; i < c->slices; i++) {
418  sstart = send;
419  send = height * (i + 1) / c->slices & cmask;
420  av_image_copy_plane(dst + sstart * width, width,
421  src + sstart * stride, stride,
422  width, send - sstart);
423  }
424  break;
425  case PRED_LEFT:
426  for (i = 0; i < c->slices; i++) {
427  sstart = send;
428  send = height * (i + 1) / c->slices & cmask;
429  left_predict(src + sstart * stride, dst + sstart * width,
430  stride, width, send - sstart);
431  }
432  break;
433  case PRED_MEDIAN:
434  for (i = 0; i < c->slices; i++) {
435  sstart = send;
436  send = height * (i + 1) / c->slices & cmask;
437  median_predict(c, src + sstart * stride, dst + sstart * width,
438  stride, width, send - sstart);
439  }
440  break;
441  default:
442  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
443  c->frame_pred);
445  }
446 
447  /* Count the usage of values */
448  count_usage(dst, width, height, counts);
449 
450  /* Check for a special case where only one symbol was used */
451  for (symbol = 0; symbol < 256; symbol++) {
452  /* If non-zero count is found, see if it matches width * height */
453  if (counts[symbol]) {
454  /* Special case if only one symbol was used */
455  if (counts[symbol] == width * (int64_t)height) {
456  /*
457  * Write a zero for the single symbol
458  * used in the plane, else 0xFF.
459  */
460  for (i = 0; i < 256; i++) {
461  if (i == symbol)
462  bytestream2_put_byte(pb, 0);
463  else
464  bytestream2_put_byte(pb, 0xFF);
465  }
466 
467  /* Write zeroes for lengths */
468  for (i = 0; i < c->slices; i++)
469  bytestream2_put_le32(pb, 0);
470 
471  /* And that's all for that plane folks */
472  return 0;
473  }
474  break;
475  }
476  }
477 
478  /* Calculate huffman lengths */
479  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
480  return ret;
481 
482  /*
483  * Write the plane's header into the output packet:
484  * - huffman code lengths (256 bytes)
485  * - slice end offsets (gotten from the slice lengths)
486  */
487  for (i = 0; i < 256; i++) {
488  bytestream2_put_byte(pb, lengths[i]);
489 
490  he[i].len = lengths[i];
491  he[i].sym = i;
492  }
493 
494  /* Calculate the huffman codes themselves */
495  calculate_codes(he);
496 
497  send = 0;
498  for (i = 0; i < c->slices; i++) {
499  sstart = send;
500  send = height * (i + 1) / c->slices & cmask;
501 
502  /*
503  * Write the huffman codes to a buffer,
504  * get the offset in bits and convert to bytes.
505  */
506  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
507  width * height + 4, width,
508  send - sstart, he) >> 3;
509 
510  slice_len = offset - slice_len;
511 
512  /* Byteswap the written huffman codes */
513  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
514  (uint32_t *) c->slice_bits,
515  slice_len >> 2);
516 
517  /* Write the offset to the stream */
518  bytestream2_put_le32(pb, offset);
519 
520  /* Seek to the data part of the packet */
521  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
522  offset - slice_len, SEEK_CUR);
523 
524  /* Write the slices' data into the output packet */
525  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
526 
527  /* Seek back to the slice offsets */
528  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
529  SEEK_CUR);
530 
531  slice_len = offset;
532  }
533 
534  /* And at the end seek to the end of written slice(s) */
535  bytestream2_seek_p(pb, offset, SEEK_CUR);
536 
537  return 0;
538 }
539 
541  const AVFrame *pic, int *got_packet)
542 {
543  UtvideoContext *c = avctx->priv_data;
544  PutByteContext pb;
545 
546  uint32_t frame_info;
547 
548  uint8_t *dst;
549 
550  int width = avctx->width, height = avctx->height;
551  int i, ret = 0;
552 
553  /* Allocate a new packet if needed, and set it to the pointer dst */
554  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
555  c->planes + 4, 0);
556 
557  if (ret < 0)
558  return ret;
559 
560  dst = pkt->data;
561 
562  bytestream2_init_writer(&pb, dst, pkt->size);
563 
564  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
565 
566  if (!c->slice_bits) {
567  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
568  return AVERROR(ENOMEM);
569  }
570 
571  /* In case of RGB, mangle the planes to Ut Video's format */
572  if (avctx->pix_fmt == AV_PIX_FMT_RGBA || avctx->pix_fmt == AV_PIX_FMT_RGB24)
574  c->planes, pic->linesize[0], width, height);
575 
576  /* Deal with the planes */
577  switch (avctx->pix_fmt) {
578  case AV_PIX_FMT_RGB24:
579  case AV_PIX_FMT_RGBA:
580  for (i = 0; i < c->planes; i++) {
581  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
582  c->slice_buffer[i], c->slice_stride, i,
583  width, height, &pb);
584 
585  if (ret) {
586  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
587  return ret;
588  }
589  }
590  break;
591  case AV_PIX_FMT_YUV444P:
592  for (i = 0; i < c->planes; i++) {
593  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
594  pic->linesize[i], i, width, height, &pb);
595 
596  if (ret) {
597  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
598  return ret;
599  }
600  }
601  break;
602  case AV_PIX_FMT_YUV422P:
603  for (i = 0; i < c->planes; i++) {
604  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
605  pic->linesize[i], i, width >> !!i, height, &pb);
606 
607  if (ret) {
608  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
609  return ret;
610  }
611  }
612  break;
613  case AV_PIX_FMT_YUV420P:
614  for (i = 0; i < c->planes; i++) {
615  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
616  pic->linesize[i], i, width >> !!i, height >> !!i,
617  &pb);
618 
619  if (ret) {
620  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
621  return ret;
622  }
623  }
624  break;
625  default:
626  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
627  avctx->pix_fmt);
628  return AVERROR_INVALIDDATA;
629  }
630 
631  /*
632  * Write frame information (LE 32-bit unsigned)
633  * into the output packet.
634  * Contains the prediction method.
635  */
636  frame_info = c->frame_pred << 8;
637  bytestream2_put_le32(&pb, frame_info);
638 
639  /*
640  * At least currently Ut Video is IDR only.
641  * Set flags accordingly.
642  */
643 #if FF_API_CODED_FRAME
645  avctx->coded_frame->key_frame = 1;
648 #endif
649 
650  pkt->size = bytestream2_tell_p(&pb);
651  pkt->flags |= AV_PKT_FLAG_KEY;
652 
653  /* Packet should be done */
654  *got_packet = 1;
655 
656  return 0;
657 }
658 
659 #define OFFSET(x) offsetof(UtvideoContext, x)
660 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
661 static const AVOption options[] = {
662 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
663  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
664  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
665  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
666  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
667 
668  { NULL},
669 };
670 
671 static const AVClass utvideo_class = {
672  .class_name = "utvideo",
673  .item_name = av_default_item_name,
674  .option = options,
675  .version = LIBAVUTIL_VERSION_INT,
676 };
677 
679  .name = "utvideo",
680  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
681  .type = AVMEDIA_TYPE_VIDEO,
682  .id = AV_CODEC_ID_UTVIDEO,
683  .priv_data_size = sizeof(UtvideoContext),
684  .priv_class = &utvideo_class,
686  .encode2 = utvideo_encode_frame,
687  .close = utvideo_encode_close,
689  .pix_fmts = (const enum AVPixelFormat[]) {
692  },
693 };
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:438
int slice_stride
Definition: utvideo.h:82
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
AVOption.
Definition: opt.h:245
static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, int step, int stride, int width, int height)
Definition: utvideoenc.c:246
uint32_t flags
Definition: utvideo.h:74
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:42
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
const char * g
Definition: vf_curves.c:112
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int slice_bits_size
Definition: utvideo.h:84
int size
Definition: avcodec.h:1602
const char * b
Definition: vf_curves.c:113
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1904
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:60
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
static AVPacket pkt
AVCodec.
Definition: avcodec.h:3600
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c)
Definition: huffyuvencdsp.c:77
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1052
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:678
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1791
Definition: vf_geq.c:46
#define height
uint8_t * data
Definition: avcodec.h:1601
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:540
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:3070
uint32_t code
Definition: magicyuv.c:49
#define A(x)
Definition: vp56_arith.h:28
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static void left_predict(uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:280
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1633
BswapDSPContext bdsp
Definition: utvideo.h:71
const int ff_ut_pred_order[5]
Definition: utvideo.c:30
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
av_default_item_name
#define AVERROR(e)
Definition: error.h:43
uint8_t sym
Definition: magicyuv.c:47
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
AVCodecContext * avctx
Definition: utvideo.h:70
const char * name
Name of the codec implementation.
Definition: avcodec.h:3607
uint32_t frame_info_size
Definition: utvideo.h:74
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:366
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1022
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1607
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:297
int compression
Definition: utvideo.h:77
static const AVOption options[]
Definition: utvideoenc.c:661
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
#define width
int width
picture width / height.
Definition: avcodec.h:1863
static const AVClass utvideo_class
Definition: utvideoenc.c:671
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, int stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:398
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
#define src
Definition: vp9dsp.c:530
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
#define OFFSET(x)
Definition: utvideoenc.c:659
Common Ut Video header.
int frame_pred
Definition: utvideo.h:79
uint8_t len
Definition: magicyuv.c:48
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
Libavcodec external API header.
attribute_deprecated int prediction_method
Definition: avcodec.h:2067
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
main external API structure.
Definition: avcodec.h:1676
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1708
int extradata_size
Definition: avcodec.h:1792
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2406
huffman tree builder and VLC generator
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1722
#define u(width,...)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
uint8_t * slice_bits
Definition: utvideo.h:83
void(* sub_hfyu_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV's variant of median prediction.
Definition: huffyuvencdsp.h:33
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:345
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:331
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
static double c[64]
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3098
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:734
int slices
Number of slices.
Definition: avcodec.h:2429
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1718
HuffYUVEncDSPContext hdsp
Definition: utvideo.h:72
uint8_t * slice_buffer[4]
Definition: utvideo.h:83
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
int len
#define FF_PRED_PLANE
Definition: avcodec.h:2069
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define av_freep(p)
#define VE
Definition: utvideoenc.c:660
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:287
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:342
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:48
This structure stores compressed data.
Definition: avcodec.h:1578
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
bitstream writer API