FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bswapdsp.h"
29 #include "get_bits.h"
30 #include "internal.h"
31 #include "raw.h"
32 #include "libavutil/avassert.h"
33 #include "libavutil/buffer.h"
34 #include "libavutil/common.h"
35 #include "libavutil/intreadwrite.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/opt.h"
38 
39 typedef struct RawVideoContext {
42  int frame_size; /* size of the frame in bytes */
43  int flip;
44  int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
45  int is_mono;
46  int is_pal8;
49  int is_yuv2;
50  int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
51  int tff;
52 
55  unsigned int bitstream_buf_size;
57 
58 static const AVOption options[]={
59 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
60 {NULL}
61 };
62 
63 static const AVClass rawdec_class = {
64  .class_name = "rawdec",
65  .option = options,
66  .version = LIBAVUTIL_VERSION_INT,
67 };
68 
70 {
71  RawVideoContext *context = avctx->priv_data;
72  const AVPixFmtDescriptor *desc;
73 
74  ff_bswapdsp_init(&context->bbdsp);
75 
76  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
77  || avctx->codec_tag == MKTAG('N','O','1','6'))
79  avctx->bits_per_coded_sample);
80  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
82  avctx->bits_per_coded_sample);
83  else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
85  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
87  avctx->bits_per_coded_sample);
88 
89  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
90  if (!desc) {
91  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
92  return AVERROR(EINVAL);
93  }
94 
97  if (!context->palette)
98  return AVERROR(ENOMEM);
99  if (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
100  avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
101  else {
102  memset(context->palette->data, 0, AVPALETTE_SIZE);
103  if (avctx->bits_per_coded_sample == 1)
104  memset(context->palette->data, 0xff, 4);
105  }
106  }
107 
108  if ((avctx->extradata_size >= 9 &&
109  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
110  avctx->codec_tag == MKTAG('c','y','u','v') ||
111  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
112  avctx->codec_tag == MKTAG('W','R','A','W'))
113  context->flip = 1;
114 
115  if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
116  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
117  context->is_mono = 1;
118  else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
119  context->is_pal8 = 1;
120 
121  if (avctx->codec_tag == MKTAG('B','1','W','0') ||
122  avctx->codec_tag == MKTAG('B','0','W','1'))
123  context->is_nut_mono = 1;
124  else if (avctx->codec_tag == MKTAG('P','A','L',8))
125  context->is_nut_pal8 = 1;
126 
127  if (avctx->codec_tag == AV_RL32("yuv2") &&
128  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
129  context->is_yuv2 = 1;
130 
131  /* Temporary solution until PAL8 is implemented in nut */
132  if (context->is_pal8 && avctx->bits_per_coded_sample == 1)
133  avctx->pix_fmt = AV_PIX_FMT_NONE;
134 
135  return 0;
136 }
137 
138 static void flip(AVCodecContext *avctx, AVFrame *frame)
139 {
140  frame->data[0] += frame->linesize[0] * (avctx->height - 1);
141  frame->linesize[0] *= -1;
142 }
143 
144 /*
145  * Scale sample to 16-bit resolution
146  */
147 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
148 
149 /**
150  * Scale buffer to 16 bits per coded sample resolution
151  */
152 #define MKSCALE16(name, r16, w16) \
153 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
154 { \
155  int i; \
156  if (!packed) { \
157  for (i = 0; i + 1 < buf_size; i += 2) \
158  w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
159  } else { \
160  GetBitContext gb; \
161  init_get_bits(&gb, buf, buf_size * 8); \
162  for (i = 0; i < avctx->width * avctx->height; i++) { \
163  int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
164  w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
165  } \
166  } \
167 }
168 
169 MKSCALE16(scale16be, AV_RB16, AV_WB16)
170 MKSCALE16(scale16le, AV_RL16, AV_WL16)
171 
172 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
173  AVPacket *avpkt)
174 {
175  const AVPixFmtDescriptor *desc;
176  RawVideoContext *context = avctx->priv_data;
177  const uint8_t *buf = avpkt->data;
178  int buf_size = avpkt->size;
179  int linesize_align = 4;
180  int stride;
181  int res, len;
182  int need_copy;
183 
184  AVFrame *frame = data;
185 
186  if (avctx->width <= 0) {
187  av_log(avctx, AV_LOG_ERROR, "width is not set\n");
188  return AVERROR_INVALIDDATA;
189  }
190  if (avctx->height <= 0) {
191  av_log(avctx, AV_LOG_ERROR, "height is not set\n");
192  return AVERROR_INVALIDDATA;
193  }
194 
195  if (context->is_nut_mono)
196  stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
197  else if (context->is_nut_pal8)
198  stride = avctx->width;
199  else
200  stride = avpkt->size / avctx->height;
201 
202  av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
203 
204  if (stride == 0 || avpkt->size < stride * avctx->height) {
205  av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
206  return AVERROR_INVALIDDATA;
207  }
208 
209  /* Temporary solution until PAL8 is implemented in nut */
210  if (avctx->pix_fmt == AV_PIX_FMT_NONE &&
211  avctx->bits_per_coded_sample == 1 &&
212  avctx->frame_number == 0 &&
213  context->palette &&
214  AV_RB64(context->palette->data) == 0xFFFFFFFF00000000
215  ) {
217  if (!pal) {
218  avctx->pix_fmt = AV_PIX_FMT_MONOWHITE;
219  context->is_pal8 = 0;
220  context->is_mono = 1;
221  } else
222  avctx->pix_fmt = AV_PIX_FMT_PAL8;
223  }
224  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
225 
226  if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4
227  || avctx->bits_per_coded_sample <= 2) &&
228  (context->is_mono || context->is_pal8) &&
229  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
230  context->is_nut_mono || context->is_nut_pal8)) {
231  context->is_1_2_4_8_bpp = 1;
232  if (context->is_mono) {
233  int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
234  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
235  FFALIGN(row_bytes, 16) * 8,
236  avctx->height, 1);
237  } else
238  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
239  FFALIGN(avctx->width, 16),
240  avctx->height, 1);
241  } else {
242  context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample && avctx->bits_per_coded_sample < 16;
243  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
244  avctx->height, 1);
245  }
246  if (context->frame_size < 0)
247  return context->frame_size;
248 
249  need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
250 
251  frame->pict_type = AV_PICTURE_TYPE_I;
252  frame->key_frame = 1;
253 
254  res = ff_decode_frame_props(avctx, frame);
255  if (res < 0)
256  return res;
257 
258  av_frame_set_pkt_pos (frame, avctx->internal->pkt->pos);
259  av_frame_set_pkt_duration(frame, avctx->internal->pkt->duration);
260 
261  if (context->tff >= 0) {
262  frame->interlaced_frame = 1;
263  frame->top_field_first = context->tff;
264  }
265 
266  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
267  return res;
268 
269  if (need_copy)
270  frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
271  else
272  frame->buf[0] = av_buffer_ref(avpkt->buf);
273  if (!frame->buf[0])
274  return AVERROR(ENOMEM);
275 
276  // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
277  if (context->is_1_2_4_8_bpp) {
278  int i, j, row_pix = 0;
279  uint8_t *dst = frame->buf[0]->data;
280  buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
281  if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
282  int pix_per_byte = context->is_mono ? 8 : 1;
283  for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
284  dst[j] = buf[i];
285  row_pix += pix_per_byte;
286  if (row_pix >= avctx->width) {
287  i += stride - (i % stride) - 1;
288  j += 16 - (j % 16) - 1;
289  row_pix = 0;
290  }
291  }
292  } else if (avctx->bits_per_coded_sample == 4) {
293  for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
294  dst[2 * j + 0] = buf[i] >> 4;
295  dst[2 * j + 1] = buf[i] & 15;
296  row_pix += 2;
297  if (row_pix >= avctx->width) {
298  i += stride - (i % stride) - 1;
299  j += 8 - (j % 8) - 1;
300  row_pix = 0;
301  }
302  }
303  } else if (avctx->bits_per_coded_sample == 2) {
304  for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
305  dst[4 * j + 0] = buf[i] >> 6;
306  dst[4 * j + 1] = buf[i] >> 4 & 3;
307  dst[4 * j + 2] = buf[i] >> 2 & 3;
308  dst[4 * j + 3] = buf[i] & 3;
309  row_pix += 4;
310  if (row_pix >= avctx->width) {
311  i += stride - (i % stride) - 1;
312  j += 4 - (j % 4) - 1;
313  row_pix = 0;
314  }
315  }
316  } else {
317  av_assert0(avctx->bits_per_coded_sample == 1);
318  for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
319  dst[8 * j + 0] = buf[i] >> 7;
320  dst[8 * j + 1] = buf[i] >> 6 & 1;
321  dst[8 * j + 2] = buf[i] >> 5 & 1;
322  dst[8 * j + 3] = buf[i] >> 4 & 1;
323  dst[8 * j + 4] = buf[i] >> 3 & 1;
324  dst[8 * j + 5] = buf[i] >> 2 & 1;
325  dst[8 * j + 6] = buf[i] >> 1 & 1;
326  dst[8 * j + 7] = buf[i] & 1;
327  row_pix += 8;
328  if (row_pix >= avctx->width) {
329  i += stride - (i % stride) - 1;
330  j += 2 - (j % 2) - 1;
331  row_pix = 0;
332  }
333  }
334  }
335  linesize_align = 16;
336  buf = dst;
337  } else if (context->is_lt_16bpp) {
338  uint8_t *dst = frame->buf[0]->data;
339  int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
340  int swap = avctx->codec_tag >> 24;
341 
342  if (packed && swap) {
343  av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
344  if (!context->bitstream_buf)
345  return AVERROR(ENOMEM);
346  if (swap == 16)
347  context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
348  else if (swap == 32)
349  context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
350  else
351  return AVERROR_INVALIDDATA;
352  buf = context->bitstream_buf;
353  }
354 
355  if (desc->flags & AV_PIX_FMT_FLAG_BE)
356  scale16be(avctx, dst, buf, buf_size, packed);
357  else
358  scale16le(avctx, dst, buf, buf_size, packed);
359 
360  buf = dst;
361  } else if (need_copy) {
362  memcpy(frame->buf[0]->data, buf, buf_size);
363  buf = frame->buf[0]->data;
364  }
365 
366  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
367  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
368  buf += buf_size - context->frame_size;
369 
370  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
371  if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
372  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
373  av_buffer_unref(&frame->buf[0]);
374  return AVERROR(EINVAL);
375  }
376 
377  if ((res = av_image_fill_arrays(frame->data, frame->linesize,
378  buf, avctx->pix_fmt,
379  avctx->width, avctx->height, 1)) < 0) {
380  av_buffer_unref(&frame->buf[0]);
381  return res;
382  }
383 
384  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
386  NULL);
387 
388  if (pal) {
389  av_buffer_unref(&context->palette);
391  if (!context->palette) {
392  av_buffer_unref(&frame->buf[0]);
393  return AVERROR(ENOMEM);
394  }
395  memcpy(context->palette->data, pal, AVPALETTE_SIZE);
396  frame->palette_has_changed = 1;
397  }
398  }
399 
400  if ((avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
401  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
402  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
403  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
404  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
405  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
406  avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
407  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
408  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
409  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
410 
411  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
412  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
413  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
414  int la0 = FFALIGN(frame->linesize[0], linesize_align);
415  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
416  frame->linesize[0] = la0;
417  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
418  }
419 
420  if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
421  (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)) {
422  frame->buf[1] = av_buffer_ref(context->palette);
423  if (!frame->buf[1]) {
424  av_buffer_unref(&frame->buf[0]);
425  return AVERROR(ENOMEM);
426  }
427  frame->data[1] = frame->buf[1]->data;
428  }
429 
430  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
431  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
432  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
433 
434  if (context->flip)
435  flip(avctx, frame);
436 
437  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
438  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
439  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
440  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
441  FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
442 
443  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
444  frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
445  frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
446  }
447 
448  if (avctx->codec_tag == AV_RL32("yuv2") &&
449  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
450  int x, y;
451  uint8_t *line = frame->data[0];
452  for (y = 0; y < avctx->height; y++) {
453  for (x = 0; x < avctx->width; x++)
454  line[2 * x + 1] ^= 0x80;
455  line += frame->linesize[0];
456  }
457  }
458 
459  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
460  frame->interlaced_frame = 1;
461  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
462  frame->top_field_first = 1;
463  }
464 
465  *got_frame = 1;
466  return buf_size;
467 }
468 
470 {
471  RawVideoContext *context = avctx->priv_data;
472 
473  av_buffer_unref(&context->palette);
474  return 0;
475 }
476 
478  .name = "rawvideo",
479  .long_name = NULL_IF_CONFIG_SMALL("raw video"),
480  .type = AVMEDIA_TYPE_VIDEO,
481  .id = AV_CODEC_ID_RAWVIDEO,
482  .priv_data_size = sizeof(RawVideoContext),
484  .close = raw_close_decoder,
485  .decode = raw_decode,
486  .priv_class = &rawdec_class,
487  .capabilities = AV_CODEC_CAP_PARAM_CHANGE,
488 };
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int is_nut_mono
Definition: rawdec.c:47
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2157
This structure describes decoded (raw) audio or video data.
Definition: frame.h:181
AVCodec ff_rawvideo_decoder
Definition: rawdec.c:477
AVOption.
Definition: opt.h:245
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
misc image utilities
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:357
void(* bswap16_buf)(uint16_t *dst, const uint16_t *src, int len)
Definition: bswapdsp.h:26
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2109
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:117
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array...
Definition: imgutils.c:341
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1752
void av_frame_set_pkt_duration(AVFrame *frame, int64_t val)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:119
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
static const AVOption options[]
Definition: rawdec.c:58
int frame_size
Definition: rawdec.c:42
AVCodec.
Definition: avcodec.h:3392
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:115
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t
#define av_cold
Definition: attributes.h:82
8 bit with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:74
AVOptions.
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:151
#define AVPALETTE_SIZE
Definition: pixfmt.h:33
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: utils.c:847
const PixelFormatTag ff_raw_pix_fmt_tags[]
Definition: raw.c:31
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1647
BswapDSPContext bbdsp
Definition: rawdec.c:53
static AVFrame * frame
unsigned int bitstream_buf_size
Definition: rawdec.c:55
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:312
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2917
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters...
Definition: imgutils.c:361
int is_lt_16bpp
Definition: rawdec.c:50
int is_1_2_4_8_bpp
Definition: rawdec.c:44
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void * bitstream_buf
Definition: rawdec.c:54
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:138
#define AVERROR(e)
Definition: error.h:43
AVBufferRef * palette
Definition: rawdec.c:41
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:91
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3399
#define FFMAX(a, b)
Definition: common.h:94
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:251
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:252
Raw Video Codec.
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rawdec.c:172
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:158
void av_frame_set_pkt_pos(AVFrame *frame, int64_t val)
int is_nut_pal8
Definition: rawdec.c:48
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:66
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
Definition: utils.c:975
int frame_size
Definition: mxfenc.c:1821
Libavcodec external API header.
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:66
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:209
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
main external API structure.
Definition: avcodec.h:1532
uint8_t * data
The data buffer.
Definition: buffer.h:89
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1564
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:64
void * buf
Definition: avisynth_c.h:553
int extradata_size
Definition: avcodec.h:1648
Describe the class of an AVClass context structure.
Definition: log.h:67
static av_cold int raw_close_decoder(AVCodecContext *avctx)
Definition: rawdec.c:469
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:276
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:322
refcounted data buffer API
AVClass * av_class
Definition: rawdec.c:40
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:116
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:192
#define MKSCALE16(name, r16, w16)
Scale buffer to 16 bits per coded sample resolution.
Definition: rawdec.c:152
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:572
A reference to a data buffer.
Definition: buffer.h:81
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:73
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_RB64
Definition: bytestream.h:87
Y , 8bpp.
Definition: pixfmt.h:71
common internal api header.
common internal and external API header
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:72
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:927
static av_cold int raw_init_decoder(AVCodecContext *avctx)
Definition: rawdec.c:69
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:128
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1574
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:317
int len
const PixelFormatTag avpriv_pix_fmt_bps_avi[]
Definition: raw.c:285
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:247
const PixelFormatTag avpriv_pix_fmt_bps_mov[]
Definition: raw.c:298
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:320
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:342
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1444
static const AVClass rawdec_class
Definition: rawdec.c:63