FFmpeg
rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bswapdsp.h"
29 #include "decode.h"
30 #include "get_bits.h"
31 #include "internal.h"
32 #include "raw.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/buffer.h"
35 #include "libavutil/common.h"
36 #include "libavutil/intreadwrite.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/opt.h"
39 
40 typedef struct RawVideoContext {
43  int frame_size; /* size of the frame in bytes */
44  int flip;
45  int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
46  int is_mono;
47  int is_pal8;
50  int is_yuv2;
51  int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
52  int tff;
53 
56  unsigned int bitstream_buf_size;
58 
59 static const AVOption options[]={
60 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
61 {NULL}
62 };
63 
64 static const AVClass rawdec_class = {
65  .class_name = "rawdec",
66  .option = options,
67  .version = LIBAVUTIL_VERSION_INT,
68 };
69 
71 {
73  const AVPixFmtDescriptor *desc;
74 
75  ff_bswapdsp_init(&context->bbdsp);
76 
77  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
78  || avctx->codec_tag == MKTAG('N','O','1','6'))
80  avctx->bits_per_coded_sample);
81  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
83  avctx->bits_per_coded_sample);
84  else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
86  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
88  avctx->bits_per_coded_sample);
89 
91  if (!desc) {
92  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
93  return AVERROR(EINVAL);
94  }
95 
96  if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
98  if (!context->palette)
99  return AVERROR(ENOMEM);
100  memset(context->palette->data, 0, AVPALETTE_SIZE);
101  if (avctx->bits_per_coded_sample == 1)
102  memset(context->palette->data, 0xff, 4);
103  }
104 
105  if ((avctx->extradata_size >= 9 &&
106  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
107  avctx->codec_tag == MKTAG('c','y','u','v') ||
108  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
109  avctx->codec_tag == MKTAG('W','R','A','W'))
110  context->flip = 1;
111 
112  if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
113  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
114  context->is_mono = 1;
115  else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
116  context->is_pal8 = 1;
117 
118  if (avctx->codec_tag == MKTAG('B','1','W','0') ||
119  avctx->codec_tag == MKTAG('B','0','W','1'))
120  context->is_nut_mono = 1;
121  else if (avctx->codec_tag == MKTAG('P','A','L',8))
122  context->is_nut_pal8 = 1;
123 
124  if (avctx->codec_tag == AV_RL32("yuv2") &&
125  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
126  context->is_yuv2 = 1;
127 
128  return 0;
129 }
130 
131 static void flip(AVCodecContext *avctx, AVFrame *frame)
132 {
133  frame->data[0] += frame->linesize[0] * (avctx->height - 1);
134  frame->linesize[0] *= -1;
135 }
136 
137 /*
138  * Scale sample to 16-bit resolution
139  */
140 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
141 
142 /**
143  * Scale buffer to 16 bits per coded sample resolution
144  */
145 #define MKSCALE16(name, r16, w16) \
146 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
147 { \
148  int i; \
149  if (!packed) { \
150  for (i = 0; i + 1 < buf_size; i += 2) \
151  w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
152  } else { \
153  GetBitContext gb; \
154  init_get_bits(&gb, buf, buf_size * 8); \
155  for (i = 0; i < avctx->width * avctx->height; i++) { \
156  int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
157  w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
158  } \
159  } \
160 }
161 
162 MKSCALE16(scale16be, AV_RB16, AV_WB16)
163 MKSCALE16(scale16le, AV_RL16, AV_WL16)
164 
165 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
166  AVPacket *avpkt)
167 {
168  const AVPixFmtDescriptor *desc;
169  RawVideoContext *context = avctx->priv_data;
170  const uint8_t *buf = avpkt->data;
171  int buf_size = avpkt->size;
172  int linesize_align = 4;
173  int stride;
174  int res, len;
175  int need_copy;
176 
177  AVFrame *frame = data;
178 
179  if (avctx->width <= 0) {
180  av_log(avctx, AV_LOG_ERROR, "width is not set\n");
181  return AVERROR_INVALIDDATA;
182  }
183  if (avctx->height <= 0) {
184  av_log(avctx, AV_LOG_ERROR, "height is not set\n");
185  return AVERROR_INVALIDDATA;
186  }
187 
188  if (context->is_nut_mono)
189  stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
190  else if (context->is_nut_pal8)
191  stride = avctx->width;
192  else
193  stride = avpkt->size / avctx->height;
194 
195  av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
196 
197  if (stride == 0 || avpkt->size < stride * avctx->height) {
198  av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
199  return AVERROR_INVALIDDATA;
200  }
201 
202  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
203 
204  if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 ||
205  avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 ||
206  (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) &&
207  (context->is_mono || context->is_pal8) &&
208  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
209  context->is_nut_mono || context->is_nut_pal8)) {
210  context->is_1_2_4_8_bpp = 1;
211  if (context->is_mono) {
212  int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
213  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
214  FFALIGN(row_bytes, 16) * 8,
215  avctx->height, 1);
216  } else
217  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
218  FFALIGN(avctx->width, 16),
219  avctx->height, 1);
220  } else {
221  context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
222  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
223  avctx->height, 1);
224  }
225  if (context->frame_size < 0)
226  return context->frame_size;
227 
228  need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
229 
230  frame->pict_type = AV_PICTURE_TYPE_I;
231  frame->key_frame = 1;
232 
233  res = ff_decode_frame_props(avctx, frame);
234  if (res < 0)
235  return res;
236 
237  frame->pkt_pos = avctx->internal->last_pkt_props->pos;
238  frame->pkt_duration = avctx->internal->last_pkt_props->duration;
239 
240  if (context->tff >= 0) {
241  frame->interlaced_frame = 1;
242  frame->top_field_first = context->tff;
243  }
244 
245  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
246  return res;
247 
248  if (need_copy)
249  frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
250  else
251  frame->buf[0] = av_buffer_ref(avpkt->buf);
252  if (!frame->buf[0])
253  return AVERROR(ENOMEM);
254 
255  // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
256  if (context->is_1_2_4_8_bpp) {
257  int i, j, row_pix = 0;
258  uint8_t *dst = frame->buf[0]->data;
259  buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
260  if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
261  int pix_per_byte = context->is_mono ? 8 : 1;
262  for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
263  dst[j] = buf[i];
264  row_pix += pix_per_byte;
265  if (row_pix >= avctx->width) {
266  i += stride - (i % stride) - 1;
267  j += 16 - (j % 16) - 1;
268  row_pix = 0;
269  }
270  }
271  } else if (avctx->bits_per_coded_sample == 4) {
272  for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
273  dst[2 * j + 0] = buf[i] >> 4;
274  dst[2 * j + 1] = buf[i] & 15;
275  row_pix += 2;
276  if (row_pix >= avctx->width) {
277  i += stride - (i % stride) - 1;
278  j += 8 - (j % 8) - 1;
279  row_pix = 0;
280  }
281  }
282  } else if (avctx->bits_per_coded_sample == 2) {
283  for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
284  dst[4 * j + 0] = buf[i] >> 6;
285  dst[4 * j + 1] = buf[i] >> 4 & 3;
286  dst[4 * j + 2] = buf[i] >> 2 & 3;
287  dst[4 * j + 3] = buf[i] & 3;
288  row_pix += 4;
289  if (row_pix >= avctx->width) {
290  i += stride - (i % stride) - 1;
291  j += 4 - (j % 4) - 1;
292  row_pix = 0;
293  }
294  }
295  } else {
296  av_assert0(avctx->bits_per_coded_sample == 1);
297  for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
298  dst[8 * j + 0] = buf[i] >> 7;
299  dst[8 * j + 1] = buf[i] >> 6 & 1;
300  dst[8 * j + 2] = buf[i] >> 5 & 1;
301  dst[8 * j + 3] = buf[i] >> 4 & 1;
302  dst[8 * j + 4] = buf[i] >> 3 & 1;
303  dst[8 * j + 5] = buf[i] >> 2 & 1;
304  dst[8 * j + 6] = buf[i] >> 1 & 1;
305  dst[8 * j + 7] = buf[i] & 1;
306  row_pix += 8;
307  if (row_pix >= avctx->width) {
308  i += stride - (i % stride) - 1;
309  j += 2 - (j % 2) - 1;
310  row_pix = 0;
311  }
312  }
313  }
314  linesize_align = 16;
315  buf = dst;
316  } else if (context->is_lt_16bpp) {
317  uint8_t *dst = frame->buf[0]->data;
318  int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
319  int swap = avctx->codec_tag >> 24;
320 
321  if (packed && swap) {
322  av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
323  if (!context->bitstream_buf)
324  return AVERROR(ENOMEM);
325  if (swap == 16)
326  context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
327  else if (swap == 32)
328  context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
329  else
330  return AVERROR_INVALIDDATA;
331  buf = context->bitstream_buf;
332  }
333 
334  if (desc->flags & AV_PIX_FMT_FLAG_BE)
335  scale16be(avctx, dst, buf, buf_size, packed);
336  else
337  scale16le(avctx, dst, buf, buf_size, packed);
338 
339  buf = dst;
340  } else if (need_copy) {
341  memcpy(frame->buf[0]->data, buf, buf_size);
342  buf = frame->buf[0]->data;
343  }
344 
345  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
346  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
347  buf += buf_size - context->frame_size;
348 
349  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
350  if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
351  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
352  av_buffer_unref(&frame->buf[0]);
353  return AVERROR(EINVAL);
354  }
355 
356  if ((res = av_image_fill_arrays(frame->data, frame->linesize,
357  buf, avctx->pix_fmt,
358  avctx->width, avctx->height, 1)) < 0) {
359  av_buffer_unref(&frame->buf[0]);
360  return res;
361  }
362 
363  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
364  int ret;
365 
366  if (!context->palette)
368  if (!context->palette) {
369  av_buffer_unref(&frame->buf[0]);
370  return AVERROR(ENOMEM);
371  }
372  ret = av_buffer_make_writable(&context->palette);
373  if (ret < 0) {
374  av_buffer_unref(&frame->buf[0]);
375  return ret;
376  }
377 
378  if (ff_copy_palette(context->palette->data, avpkt, avctx)) {
379  frame->palette_has_changed = 1;
380  } else if (context->is_nut_pal8) {
381  int vid_size = avctx->width * avctx->height;
382  int pal_size = avpkt->size - vid_size;
383 
384  if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
385  const uint8_t *pal = avpkt->data + vid_size;
386  memcpy(context->palette->data, pal, pal_size);
387  frame->palette_has_changed = 1;
388  }
389  }
390  }
391 
392  if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 ||
393  avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
394  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
395  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
396  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
397  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
398  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
399  avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
400  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
401  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
402  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
403 
404  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
405  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
406  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
407  int la0 = FFALIGN(frame->linesize[0], linesize_align);
408  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
409  frame->linesize[0] = la0;
410  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
411  }
412 
413  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) {
414  frame->buf[1] = av_buffer_ref(context->palette);
415  if (!frame->buf[1]) {
416  av_buffer_unref(&frame->buf[0]);
417  return AVERROR(ENOMEM);
418  }
419  frame->data[1] = frame->buf[1]->data;
420  }
421 
422  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
423  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
424  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
425 
426  if (context->flip)
427  flip(avctx, frame);
428 
429  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
430  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
431  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
432  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
433  FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
434 
435  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
436  frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
437  frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
438  }
439 
440  if (avctx->codec_tag == AV_RL32("yuv2") &&
441  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
442  int x, y;
443  uint8_t *line = frame->data[0];
444  for (y = 0; y < avctx->height; y++) {
445  for (x = 0; x < avctx->width; x++)
446  line[2 * x + 1] ^= 0x80;
447  line += frame->linesize[0];
448  }
449  }
450 
451  if (avctx->codec_tag == AV_RL32("b64a") &&
452  avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
453  uint8_t *dst = frame->data[0];
454  uint64_t v;
455  int x, y;
456  for (y = 0; y < avctx->height; y++) {
457  for (x = 0; x >> 3 < avctx->width; x += 8) {
458  v = AV_RB64(&dst[x]);
459  AV_WB64(&dst[x], v << 16 | v >> 48);
460  }
461  dst += frame->linesize[0];
462  }
463  }
464 
465  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
466  frame->interlaced_frame = 1;
467  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
468  frame->top_field_first = 1;
469  }
470 
471  *got_frame = 1;
472  return buf_size;
473 }
474 
476 {
478 
479  av_buffer_unref(&context->palette);
480  av_freep(&context->bitstream_buf);
481  return 0;
482 }
483 
485  .name = "rawvideo",
486  .long_name = NULL_IF_CONFIG_SMALL("raw video"),
487  .type = AVMEDIA_TYPE_VIDEO,
488  .id = AV_CODEC_ID_RAWVIDEO,
489  .priv_data_size = sizeof(RawVideoContext),
491  .close = raw_close_decoder,
492  .decode = raw_decode,
493  .priv_class = &rawdec_class,
494  .capabilities = AV_CODEC_CAP_PARAM_CHANGE,
495  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
496 };
raw_init_decoder
static av_cold int raw_init_decoder(AVCodecContext *avctx)
Definition: rawdec.c:70
RawVideoContext::frame_size
int frame_size
Definition: rawdec.c:43
AVCodec
AVCodec.
Definition: codec.h:202
bswapdsp.h
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:280
RawVideoContext::flip
int flip
Definition: rawdec.c:44
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:63
RawVideoContext::is_nut_mono
int is_nut_mono
Definition: rawdec.c:48
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:195
RawVideoContext::bbdsp
BswapDSPContext bbdsp
Definition: rawdec.c:54
internal.h
RawVideoContext::bitstream_buf_size
unsigned int bitstream_buf_size
Definition: rawdec.c:56
AVOption
AVOption.
Definition: opt.h:247
data
const char data[16]
Definition: mxf.c:143
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2612
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AV_WB64
#define AV_WB64(p, v)
Definition: intreadwrite.h:433
init
static int init
Definition: av_tx.c:47
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:41
raw.h
avassert.h
RawVideoContext::is_lt_16bpp
int is_lt_16bpp
Definition: rawdec.c:51
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
options
static const AVOption options[]
Definition: rawdec.c:59
intreadwrite.h
frame_size
int frame_size
Definition: mxfenc.c:2199
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
PIX_FMT_LIST_AVI
@ PIX_FMT_LIST_AVI
Definition: raw.h:41
RawVideoContext::is_pal8
int is_pal8
Definition: rawdec.c:47
RawVideoContext
Definition: rawdec.c:40
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
RawVideoContext::is_1_2_4_8_bpp
int is_1_2_4_8_bpp
Definition: rawdec.c:45
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
RawVideoContext::palette
AVBufferRef * palette
Definition: rawdec.c:42
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
RawVideoContext::is_yuv2
int is_yuv2
Definition: rawdec.c:50
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
RawVideoContext::bitstream_buf
void * bitstream_buf
Definition: rawdec.c:55
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:131
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
avpriv_pix_fmt_find
enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list, unsigned fourcc)
Definition: raw.c:352
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:446
PIX_FMT_LIST_RAW
@ PIX_FMT_LIST_RAW
Definition: raw.h:40
av_buffer_make_writable
int av_buffer_make_writable(AVBufferRef **pbuf)
Create a writable reference from a given buffer reference, avoiding data copy if possible.
Definition: buffer.c:165
MKSCALE16
#define MKSCALE16(name, r16, w16)
Scale buffer to 16 bits per coded sample resolution.
Definition: rawdec.c:145
buffer.h
RawVideoContext::is_nut_pal8
int is_nut_pal8
Definition: rawdec.c:49
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
line
Definition: graph2dot.c:48
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1418
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
raw_decode
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rawdec.c:165
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:278
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
raw_close_decoder
static av_cold int raw_close_decoder(AVCodecContext *avctx)
Definition: rawdec.c:475
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1489
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
rawdec_class
static const AVClass rawdec_class
Definition: rawdec.c:64
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
RawVideoContext::tff
int tff
Definition: rawdec.c:52
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
PIX_FMT_LIST_MOV
@ PIX_FMT_LIST_MOV
Definition: raw.h:42
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
imgutils.h
RawVideoContext::av_class
AVClass * av_class
Definition: rawdec.c:41
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
BswapDSPContext
Definition: bswapdsp.h:24
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_copy_palette
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so,...
Definition: decode.c:1854
RawVideoContext::is_mono
int is_mono
Definition: rawdec.c:46
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
ff_rawvideo_decoder
const AVCodec ff_rawvideo_decoder
Definition: rawdec.c:484
AV_RB64
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_RB64
Definition: bytestream.h:95
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98