144 int channel_offsets[4];
182 #define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP 0x38000000 186 #define HALF_FLOAT_MAX_BIASED_EXP_AS_SINGLE_FP_EXP 0x47800000 189 #define FLOAT_MAX_BIASED_EXP (0xFF << 23) 191 #define HALF_FLOAT_MAX_BIASED_EXP (0x1F << 10) 202 unsigned int sign = (
unsigned int) (hf >> 15);
203 unsigned int mantissa = (
unsigned int) (hf & ((1 << 10) - 1));
207 if (exp == HALF_FLOAT_MAX_BIASED_EXP) {
213 }
else if (exp == 0x0) {
219 while (!(mantissa & (1 << 10))) {
226 mantissa &= ((1 << 10) - 1);
237 f.
i = (sign << 31) | exp | mantissa;
245 unsigned long dest_len = uncompressed_size;
247 if (uncompress(td->
tmp, &dest_len, src, compressed_size) != Z_OK ||
248 dest_len != uncompressed_size)
263 const int8_t *
s =
src;
264 int ssize = compressed_size;
265 int dsize = uncompressed_size;
275 if ((dsize -= count) < 0 ||
276 (ssize -= count + 1) < 0)
284 if ((dsize -= count) < 0 ||
306 #define USHORT_RANGE (1 << 16) 307 #define BITMAP_SIZE (1 << 13) 314 if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
319 memset(lut + k, 0, (USHORT_RANGE - k) * 2);
324 static void apply_lut(
const uint16_t *lut, uint16_t *dst,
int dsize)
328 for (i = 0; i < dsize; ++
i)
329 dst[i] = lut[dst[i]];
332 #define HUF_ENCBITS 16 // literal (value) bit length 333 #define HUF_ENCSIZE ((1 << HUF_ENCBITS) + 1) // encoding table size 337 uint64_t
c, n[59] = { 0 };
344 for (i = 58; i > 0; --
i) {
345 uint64_t nc = ((c + n[
i]) >> 1);
354 freq[
i] = l | (n[l]++ << 6);
358 #define SHORT_ZEROCODE_RUN 59 359 #define LONG_ZEROCODE_RUN 63 360 #define SHORTEST_LONG_RUN (2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN) 361 #define LONGEST_LONG_RUN (255 + SHORTEST_LONG_RUN) 371 for (; im <= iM; im++) {
377 if (im + zerun > iM + 1)
387 if (im + zerun > iM + 1)
409 for (
int i = im;
i < iM;
i++) {
413 if (td->
he[j].
len > 32) {
417 if (td->
he[j].
len > 0)
430 if (td->
he[j].
len > 32) {
439 &td->
he[0].
len,
sizeof(td->
he[0]),
sizeof(td->
he[0].
len),
441 &td->
he[0].
sym,
sizeof(td->
he[0]),
sizeof(td->
he[0].
sym), 0);
445 int no, uint16_t *
out)
456 uint16_t fill = out[oe - 1];
471 uint16_t *dst,
int dst_size)
477 src_size = bytestream2_get_le32(gb);
478 im = bytestream2_get_le32(gb);
479 iM = bytestream2_get_le32(gb);
481 nBits = bytestream2_get_le32(gb);
493 if (!td->
freq || !td->
he) {
512 static inline void wdec14(uint16_t l, uint16_t
h, uint16_t *
a, uint16_t *
b)
517 int ai = ls + (hi & 1) + (hi >> 1);
519 int16_t bs = ai - hi;
526 #define A_OFFSET (1 << (NBITS - 1)) 527 #define MOD_MASK ((1 << NBITS) - 1) 529 static inline void wdec16(uint16_t l, uint16_t
h, uint16_t *
a, uint16_t *
b)
540 int ny,
int oy, uint16_t mx)
542 int w14 = (mx < (1 << 14));
543 int n = (nx > ny) ? ny : nx;
556 uint16_t *ey = in + oy * (ny - p2);
557 uint16_t i00, i01, i10, i11;
563 for (; py <= ey; py += oy2) {
565 uint16_t *ex = py + ox * (nx - p2);
567 for (; px <= ex; px += ox2) {
568 uint16_t *p01 = px + ox1;
569 uint16_t *p10 = px + oy1;
570 uint16_t *p11 = p10 + ox1;
573 wdec14(*px, *p10, &i00, &i10);
574 wdec14(*p01, *p11, &i01, &i11);
575 wdec14(i00, i01, px, p01);
576 wdec14(i10, i11, p10, p11);
578 wdec16(*px, *p10, &i00, &i10);
579 wdec16(*p01, *p11, &i01, &i11);
580 wdec16(i00, i01, px, p01);
581 wdec16(i10, i11, p10, p11);
586 uint16_t *p10 = px + oy1;
589 wdec14(*px, *p10, &i00, p10);
591 wdec16(*px, *p10, &i00, p10);
599 uint16_t *ex = py + ox * (nx - p2);
601 for (; px <= ex; px += ox2) {
602 uint16_t *p01 = px + ox1;
605 wdec14(*px, *p01, &i00, p01);
607 wdec16(*px, *p01, &i00, p01);
622 uint16_t maxval, min_non_zero, max_non_zero;
624 uint16_t *
tmp = (uint16_t *)td->
tmp;
643 min_non_zero = bytestream2_get_le16(&gb);
644 max_non_zero = bytestream2_get_le16(&gb);
650 if (min_non_zero <= max_non_zero)
652 max_non_zero - min_non_zero + 1);
670 for (j = 0; j < pixel_half_size; j++)
672 td->
xsize * pixel_half_size, maxval);
673 ptr += td->
xsize * td->
ysize * pixel_half_size;
689 tmp_offset += pixel_half_size;
692 s->bbdsp.bswap16_buf(
out,
in, td->
xsize * pixel_half_size);
694 memcpy(
out,
in, td->
xsize * 2 * pixel_half_size);
704 int compressed_size,
int uncompressed_size,
707 unsigned long dest_len, expected_len = 0;
722 dest_len = expected_len;
724 if (uncompress(td->
tmp, &dest_len, src, compressed_size) != Z_OK) {
726 }
else if (dest_len != expected_len) {
731 for (i = 0; i < td->
ysize; i++)
740 ptr[1] = ptr[0] + td->
xsize;
741 ptr[2] = ptr[1] + td->
xsize;
742 in = ptr[2] + td->
xsize;
744 for (j = 0; j < td->
xsize; ++j) {
745 uint32_t
diff = ((unsigned)*(ptr[0]++) << 24) |
746 (*(ptr[1]++) << 16) |
749 bytestream_put_le32(&out, pixel);
754 ptr[1] = ptr[0] + td->
xsize;
755 in = ptr[1] + td->
xsize;
756 for (j = 0; j < td->
xsize; j++) {
757 uint32_t
diff = (*(ptr[0]++) << 8) | *(ptr[1]++);
760 bytestream_put_le16(&out, pixel);
765 ptr[1] = ptr[0] + s->
xdelta;
766 ptr[2] = ptr[1] + s->
xdelta;
767 ptr[3] = ptr[2] + s->
xdelta;
770 for (j = 0; j < s->
xdelta; ++j) {
771 uint32_t
diff = ((uint32_t)*(ptr[0]++) << 24) |
772 (*(ptr[1]++) << 16) |
773 (*(ptr[2]++) << 8 ) |
776 bytestream_put_le32(&out, pixel);
789 unsigned short shift = (b[ 2] >> 2) & 15;
790 unsigned short bias = (0x20 <<
shift);
793 s[ 0] = (b[0] << 8) | b[1];
795 s[ 4] = s[ 0] + ((((b[ 2] << 4) | (b[ 3] >> 4)) & 0x3f) <<
shift) - bias;
796 s[ 8] = s[ 4] + ((((b[ 3] << 2) | (b[ 4] >> 6)) & 0x3f) <<
shift) - bias;
797 s[12] = s[ 8] + ((b[ 4] & 0x3f) << shift) - bias;
799 s[ 1] = s[ 0] + ((b[ 5] >> 2) << shift) - bias;
800 s[ 5] = s[ 4] + ((((b[ 5] << 4) | (b[ 6] >> 4)) & 0x3f) <<
shift) - bias;
801 s[ 9] = s[ 8] + ((((b[ 6] << 2) | (b[ 7] >> 6)) & 0x3f) <<
shift) - bias;
802 s[13] = s[12] + ((b[ 7] & 0x3f) << shift) - bias;
804 s[ 2] = s[ 1] + ((b[ 8] >> 2) << shift) - bias;
805 s[ 6] = s[ 5] + ((((b[ 8] << 4) | (b[ 9] >> 4)) & 0x3f) <<
shift) - bias;
806 s[10] = s[ 9] + ((((b[ 9] << 2) | (b[10] >> 6)) & 0x3f) <<
shift) - bias;
807 s[14] = s[13] + ((b[10] & 0x3f) << shift) - bias;
809 s[ 3] = s[ 2] + ((b[11] >> 2) << shift) - bias;
810 s[ 7] = s[ 6] + ((((b[11] << 4) | (b[12] >> 4)) & 0x3f) <<
shift) - bias;
811 s[11] = s[10] + ((((b[12] << 2) | (b[13] >> 6)) & 0x3f) <<
shift) - bias;
812 s[15] = s[14] + ((b[13] & 0x3f) << shift) - bias;
814 for (i = 0; i < 16; ++
i) {
826 s[0] = (b[0] << 8) | b[1];
833 for (i = 1; i < 16; i++)
840 const int8_t *sr =
src;
841 int stay_to_uncompress = compressed_size;
842 int nb_b44_block_w, nb_b44_block_h;
843 int index_tl_x, index_tl_y, index_out, index_tmp;
844 uint16_t tmp_buffer[16];
846 int target_channel_offset = 0;
849 nb_b44_block_w = td->
xsize / 4;
850 if ((td->
xsize % 4) != 0)
853 nb_b44_block_h = td->
ysize / 4;
854 if ((td->
ysize % 4) != 0)
859 for (iY = 0; iY < nb_b44_block_h; iY++) {
860 for (iX = 0; iX < nb_b44_block_w; iX++) {
861 if (stay_to_uncompress < 3) {
866 if (src[compressed_size - stay_to_uncompress + 2] == 0xfc) {
869 stay_to_uncompress -= 3;
871 if (stay_to_uncompress < 14) {
877 stay_to_uncompress -= 14;
884 for (y = index_tl_y; y <
FFMIN(index_tl_y + 4, td->
ysize); y++) {
885 for (x = index_tl_x; x <
FFMIN(index_tl_x + 4, td->
xsize); x++) {
887 index_tmp = (y-index_tl_y) * 4 + (x-index_tl_x);
894 target_channel_offset += 2;
896 if (stay_to_uncompress < td->ysize * td->
xsize * 4) {
897 av_log(s,
AV_LOG_ERROR,
"Not enough data for uncompress channel: %d", stay_to_uncompress);
901 for (y = 0; y < td->
ysize; y++) {
906 target_channel_offset += 4;
908 stay_to_uncompress -= td->
ysize * td->
xsize * 4;
916 int jobnr,
int threadnr)
921 const uint8_t *channel_buffer[4] = { 0 };
923 uint64_t line_offset, uncompressed_size;
927 uint64_t tile_x, tile_y, tile_level_x, tile_level_y;
930 int bxmin = 0, axmax = 0, window_xoffset = 0;
931 int window_xmin, window_xmax, window_ymin, window_ymax;
932 int data_xoffset, data_yoffset, data_window_offset, xsize, ysize;
934 int c, rgb_channel_count;
935 float one_gamma = 1.0f / s->
gamma;
942 if (buf_size < 20 || line_offset > buf_size - 20)
945 src = buf + line_offset + 20;
951 tile_level_x =
AV_RL32(src - 12);
952 tile_level_y =
AV_RL32(src - 8);
955 if (data_size <= 0 || data_size > buf_size - line_offset - 20)
958 if (tile_level_x || tile_level_y) {
979 if (buf_size < 8 || line_offset > buf_size - 8)
982 src = buf + line_offset + 8;
991 if (data_size <= 0 || data_size > buf_size - line_offset - 8)
1004 line_offset > buf_size - uncompressed_size)) ||
1006 line_offset > buf_size - data_size))) {
1015 xsize = window_xmax - window_xmin;
1016 ysize = window_ymax - window_ymin;
1019 if (xsize <= 0 || ysize <= 0)
1028 bxmin = window_xoffset *
step;
1033 window_xmax = avctx->
width;
1038 if (data_size < uncompressed_size || s->is_tile) {
1044 if (data_size < uncompressed_size) {
1087 rgb_channel_count = 3;
1090 rgb_channel_count = 1;
1098 int channel_count = s->
channel_offsets[3] >= 0 ? 4 : rgb_channel_count;
1100 channel_buffer[1] = channel_buffer[0];
1101 channel_buffer[2] = channel_buffer[0];
1104 for (c = 0; c < channel_count; c++) {
1106 ptr = p->
data[plane] + window_ymin * p->
linesize[plane] + (window_xmin * 4);
1108 for (i = 0; i < ysize; i++, ptr += p->
linesize[plane]) {
1112 src = channel_buffer[
c];
1116 memset(ptr_x, 0, bxmin);
1117 ptr_x += window_xoffset;
1122 if (trc_func && c < 3) {
1123 for (x = 0; x < xsize; x++) {
1124 t.
i = bytestream_get_le32(&src);
1125 t.
f = trc_func(t.
f);
1129 for (x = 0; x < xsize; x++) {
1130 t.
i = bytestream_get_le32(&src);
1131 if (t.
f > 0.0f && c < 3)
1132 t.
f =
powf(t.
f, one_gamma);
1138 if (c < 3 || !trc_func) {
1139 for (x = 0; x < xsize; x++) {
1140 *ptr_x++ = s->
gamma_table[bytestream_get_le16(&src)];
1143 for (x = 0; x < xsize; x++) {
1150 memset(ptr_x, 0, axmax);
1159 for (i = 0; i < ysize; i++, ptr += p->
linesize[0]) {
1165 for (c = 0; c < rgb_channel_count; c++) {
1166 rgb[
c] = channel_buffer[
c];
1169 if (channel_buffer[3])
1170 a = channel_buffer[3];
1172 ptr_x = (uint16_t *) ptr;
1175 memset(ptr_x, 0, bxmin);
1178 for (x = 0; x < xsize; x++) {
1179 for (c = 0; c < rgb_channel_count; c++) {
1180 *ptr_x++ = bytestream_get_le32(&rgb[c]) >> 16;
1183 if (channel_buffer[3])
1184 *ptr_x++ = bytestream_get_le32(&a) >> 16;
1188 memset(ptr_x, 0, axmax);
1193 if (channel_buffer[3])
1206 if (!bytestream2_peek_byte(gb))
1210 for (
int i = 0;
i < 2;
i++)
1211 while (bytestream2_get_byte(gb) != 0);
1231 const char *value_name,
1232 const char *value_type,
1233 unsigned int minimum_length)
1239 !strcmp(gb->
buffer, value_name)) {
1241 gb->
buffer += strlen(value_name) + 1;
1242 if (!strcmp(gb->
buffer, value_type)) {
1243 gb->
buffer += strlen(value_type) + 1;
1244 var_size = bytestream2_get_le32(gb);
1250 gb->
buffer -= strlen(value_name) + 1;
1252 "Unknown data type %s for header variable %s.\n",
1253 value_type, value_name);
1265 int layer_match = 0;
1267 int dup_channels = 0;
1297 magic_number = bytestream2_get_le32(gb);
1298 if (magic_number != 20000630) {
1305 version = bytestream2_get_byte(gb);
1311 flags = bytestream2_get_le24(gb);
1328 if (bytestream2_peek_byte(gb)) {
1332 if (!bytestream2_peek_byte(gb))
1339 if (!bytestream2_peek_byte(gb)) {
1345 if (bytestream2_peek_byte(gb)) {
1349 if (!bytestream2_peek_byte(gb))
1354 if (!bytestream2_peek_byte(gb))
1360 "chlist", 38)) >= 0) {
1372 int channel_index = -1;
1375 if (strcmp(s->
layer,
"") != 0) {
1379 "Channel match layer : %s.\n", ch_gb.
buffer);
1381 if (*ch_gb.
buffer ==
'.')
1386 "Channel doesn't match layer : %s.\n", ch_gb.
buffer);
1414 "Unsupported channel %.256s.\n", ch_gb.
buffer);
1420 bytestream2_get_byte(&ch_gb))
1429 current_pixel_type = bytestream2_get_le32(&ch_gb);
1432 current_pixel_type);
1438 xsub = bytestream2_get_le32(&ch_gb);
1439 ysub = bytestream2_get_le32(&ch_gb);
1441 if (xsub != 1 || ysub != 1) {
1443 "Subsampling %dx%d",
1453 "RGB channels not of the same depth.\n");
1459 }
else if (channel_index >= 0) {
1461 "Multiple channels with index %d.\n", channel_index);
1462 if (++dup_channels > 10) {
1476 channel->
xsub = xsub;
1477 channel->
ysub = ysub;
1479 if (current_pixel_type ==
EXR_HALF) {
1508 int xmin, ymin, xmax, ymax;
1514 xmin = bytestream2_get_le32(gb);
1515 ymin = bytestream2_get_le32(gb);
1516 xmax = bytestream2_get_le32(gb);
1517 ymax = bytestream2_get_le32(gb);
1519 if (xmin > xmax || ymin > ymax ||
1520 (
unsigned)xmax - xmin >= INT_MAX ||
1521 (
unsigned)ymax - ymin >= INT_MAX) {
1534 "box2i", 34)) >= 0) {
1542 sx = bytestream2_get_le32(gb);
1543 sy = bytestream2_get_le32(gb);
1544 dx = bytestream2_get_le32(gb);
1545 dy = bytestream2_get_le32(gb);
1552 "lineOrder", 25)) >= 0) {
1559 line_order = bytestream2_get_byte(gb);
1561 if (line_order > 2) {
1569 "float", 31)) >= 0) {
1575 s->
sar = bytestream2_get_le32(gb);
1579 "compression", 29)) >= 0) {
1590 "Found more than one compression attribute.\n");
1595 "tiledesc", 22)) >= 0) {
1600 "Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n");
1605 tileLevel = bytestream2_get_byte(gb);
1625 "string", 1)) >= 0) {
1633 "rational", 33)) >= 0) {
1650 "string", 16)) >= 0) {
1654 if (strncmp(
"scanlineimage", key, var_size) &&
1655 strncmp(
"tiledimage", key, var_size))
1660 "preview", 16)) >= 0) {
1661 uint32_t pw = bytestream2_get_le32(gb);
1662 uint32_t ph = bytestream2_get_le32(gb);
1663 int64_t psize = 4LL * pw * ph;
1688 bytestream2_peek_byte(gb) && i < 255) {
1689 name[i++] = bytestream2_get_byte(gb);
1695 bytestream2_peek_byte(gb) && i < 255) {
1696 type[i++] = bytestream2_get_byte(gb);
1699 size = bytestream2_get_le32(gb);
1702 if (!strcmp(type,
"string"))
1746 int i, y,
ret, ymax;
1750 uint64_t start_offset_table;
1751 uint64_t start_next_scanline;
1839 out_line_size = avctx->
width * 4;
1860 if (!s->
is_tile && bytestream2_peek_le64(gb) == 0) {
1864 start_next_scanline = start_offset_table + nb_blocks * 8;
1867 for (y = 0; y < nb_blocks; y++) {
1869 bytestream2_put_le64(&offset_table_writer, start_next_scanline);
1873 start_next_scanline += (bytestream2_get_le32(gb) + 8);
1883 for (i = 0; i <
planes; i++) {
1884 ptr = picture->
data[
i];
1886 memset(ptr, 0, out_line_size);
1897 if (ymax < avctx->
height)
1898 for (i = 0; i <
planes; i++) {
1900 for (y = ymax; y < avctx->
height; y++) {
1901 memset(ptr, 0, out_line_size);
1917 float one_gamma = 1.0
f / s->
gamma;
1930 for (i = 0; i < 65536; ++
i) {
1932 t.
f = trc_func(t.
f);
1936 if (one_gamma > 0.9999
f && one_gamma < 1.0001
f) {
1937 for (i = 0; i < 65536; ++
i) {
1941 for (i = 0; i < 65536; ++
i) {
1947 t.
f =
powf(t.
f, one_gamma);
1983 #define OFFSET(x) offsetof(EXRContext, x) 1984 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 1986 {
"layer",
"Set the decoding layer",
OFFSET(layer),
1988 {
"part",
"Set the decoding part",
OFFSET(selected_part),
1990 {
"gamma",
"Set the float gamma value when decoding",
OFFSET(gamma),
1994 {
"apply_trc",
"color transfer characteristics to apply to EXR linear input",
OFFSET(apply_trc_type),
1996 {
"bt709",
"BT.709", 0,
1998 {
"gamma",
"gamma", 0,
2000 {
"gamma22",
"BT.470 M", 0,
2002 {
"gamma28",
"BT.470 BG", 0,
2004 {
"smpte170m",
"SMPTE 170 M", 0,
2006 {
"smpte240m",
"SMPTE 240 M", 0,
2008 {
"linear",
"Linear", 0,
2012 {
"log_sqrt",
"Log square root", 0,
2014 {
"iec61966_2_4",
"IEC 61966-2-4", 0,
2016 {
"bt1361",
"BT.1361", 0,
2018 {
"iec61966_2_1",
"IEC 61966-2-1", 0,
2020 {
"bt2020_10bit",
"BT.2020 - 10 bit", 0,
2022 {
"bt2020_12bit",
"BT.2020 - 12 bit", 0,
2024 {
"smpte2084",
"SMPTE ST 2084", 0,
2026 {
"smpte428_1",
"SMPTE ST 428-1", 0,
2050 .priv_class = &exr_class,
ITU-R BT2020 for 12-bit system.
int plane
Which of the 4 planes contains the component.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift(int a, int b)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
static int decode_header(EXRContext *s, AVFrame *frame)
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
ptrdiff_t const GLvoid * data
"Linear transfer characteristics"
static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
#define AV_PIX_FMT_RGBA64
#define avpriv_request_sample(...)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
static int rle_uncompress(EXRContext *ctx, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static int pxr24_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
const char * av_default_item_name(void *ptr)
Return the context name.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
enum ExrPixelType pixel_type
static int decode_block(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define HALF_FLOAT_MAX_BIASED_EXP
static const struct @322 planes[]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
AVColorTransferCharacteristic
Color Transfer Characteristic.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
void(* predictor)(uint8_t *src, ptrdiff_t size)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static void wav_decode(uint16_t *in, int nx, int ox, int ny, int oy, uint16_t mx)
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Multithreading support functions.
static int huf_decode(VLC *vlc, GetByteContext *gb, int nbits, int run_sym, int no, uint16_t *out)
GLsizei GLboolean const GLfloat * value
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
static int huf_uncompress(EXRContext *s, EXRThreadData *td, GetByteContext *gb, uint16_t *dst, int dst_size)
static int get_bits_count(const GetBitContext *s)
static const AVOption options[]
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
bitstream reader API header.
AVDictionary * metadata
metadata.
uint8_t * uncompressed_data
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static int get_bits_left(GetBitContext *gb)
#define FLOAT_MAX_BIASED_EXP
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
ITU-R BT1361 Extended Colour Gamut.
static av_cold int decode_init(AVCodecContext *avctx)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
enum AVColorTransferCharacteristic apply_trc_type
simple assert() macros that are a bit more flexible than ISO C assert().
enum ExrPixelType pixel_type
const char * name
Name of the codec implementation.
#define LONG_ZEROCODE_RUN
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
int current_channel_offset
EXRThreadData * thread_data
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
static void unpack_3(const uint8_t b[3], uint16_t s[16])
static int zip_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
uint8_t nb_components
The number of components each pixel has, (1-4)
enum AVPictureType pict_type
Picture type of the frame.
#define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP
#define AV_PIX_FMT_GRAY16
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
int width
picture width / height.
enum ExrCompr compression
EXRTileAttribute tile_attr
static void unpack_14(const uint8_t b[14], uint16_t s[16])
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
avpriv_trc_function avpriv_get_trc_function_from_trc(enum AVColorTransferCharacteristic trc)
Determine the function needed to apply the given AVColorTransferCharacteristic to linear input...
static void huf_canonical_code_table(uint64_t *freq)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
enum ExrTileLevelMode level_mode
#define SHORTEST_LONG_RUN
static int check_header_variable(EXRContext *s, const char *value_name, const char *value_type, unsigned int minimum_length)
Check if the variable name corresponds to its data type.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
static int b44_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static av_always_inline int bytestream2_tell(GetByteContext *g)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
#define AV_LOG_INFO
Standard information.
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Libavcodec external API header.
#define AV_PIX_FMT_GRAYF32
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
"Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static av_cold int decode_end(AVCodecContext *avctx)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define flags(name, subs,...)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
#define SHORT_ZEROCODE_RUN
static union av_intfloat32 exr_half2float(uint16_t hf)
Convert a half float as a uint16_t into a full float.
#define AV_PIX_FMT_GBRPF32
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
#define AV_PIX_FMT_GBRAPF32
IEC 61966-2-1 (sRGB or sYCC)
common internal api header.
common internal and external API header
channel
Use these values when setting the channel map with ebur128_set_channel().
static int huf_build_dec_table(EXRContext *s, EXRThreadData *td, int im, int iM)
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
VLC_TYPE(* table)[2]
code, bits
static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize, int dsize, EXRThreadData *td)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
ITU-R BT2020 for 10-bit system.
static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define av_malloc_array(a, b)
static int huf_unpack_enc_table(GetByteContext *gb, int32_t im, int32_t iM, uint64_t *freq)
static void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
av_cold void ff_exrdsp_init(ExrDSPContext *c)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
const AVPixFmtDescriptor * desc
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void(* reorder_pixels)(uint8_t *dst, const uint8_t *src, ptrdiff_t size)
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static const AVClass exr_class
union av_intfloat32 gamma_table[65536]
enum ExrTileLevelRound level_round
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
void * av_mallocz_array(size_t nmemb, size_t size)
"Logarithmic transfer characteristic (100:1 range)"
double(* avpriv_trc_function)(double)
static void skip_header_chunk(EXRContext *s)