41 { 1, 3, 1, 3, 1, 3, 1, 3, },
42 { 2, 0, 2, 0, 2, 0, 2, 0, },
43 { 1, 3, 1, 3, 1, 3, 1, 3, },
47 { 6, 2, 6, 2, 6, 2, 6, 2, },
48 { 0, 4, 0, 4, 0, 4, 0, 4, },
49 { 6, 2, 6, 2, 6, 2, 6, 2, },
53 { 8, 4, 11, 7, 8, 4, 11, 7, },
54 { 2, 14, 1, 13, 2, 14, 1, 13, },
55 { 10, 6, 9, 5, 10, 6, 9, 5, },
56 { 0, 12, 3, 15, 0, 12, 3, 15, },
57 { 8, 4, 11, 7, 8, 4, 11, 7, },
61 { 17, 9, 23, 15, 16, 8, 22, 14, },
62 { 5, 29, 3, 27, 4, 28, 2, 26, },
63 { 21, 13, 19, 11, 20, 12, 18, 10, },
64 { 0, 24, 6, 30, 1, 25, 7, 31, },
65 { 16, 8, 22, 14, 17, 9, 23, 15, },
66 { 4, 28, 2, 26, 5, 29, 3, 27, },
67 { 20, 12, 18, 10, 21, 13, 19, 11, },
68 { 1, 25, 7, 31, 0, 24, 6, 30, },
69 { 17, 9, 23, 15, 16, 8, 22, 14, },
73 { 0, 55, 14, 68, 3, 58, 17, 72, },
74 { 37, 18, 50, 32, 40, 22, 54, 35, },
75 { 9, 64, 5, 59, 13, 67, 8, 63, },
76 { 46, 27, 41, 23, 49, 31, 44, 26, },
77 { 2, 57, 16, 71, 1, 56, 15, 70, },
78 { 39, 21, 52, 34, 38, 19, 51, 33, },
79 { 11, 66, 7, 62, 10, 65, 6, 60, },
80 { 48, 30, 43, 25, 47, 29, 42, 24, },
81 { 0, 55, 14, 68, 3, 58, 17, 72, },
86 {117, 62, 158, 103, 113, 58, 155, 100, },
87 { 34, 199, 21, 186, 31, 196, 17, 182, },
88 {144, 89, 131, 76, 141, 86, 127, 72, },
89 { 0, 165, 41, 206, 10, 175, 52, 217, },
90 {110, 55, 151, 96, 120, 65, 162, 107, },
91 { 28, 193, 14, 179, 38, 203, 24, 189, },
92 {138, 83, 124, 69, 148, 93, 134, 79, },
93 { 7, 172, 48, 213, 3, 168, 45, 210, },
94 {117, 62, 158, 103, 113, 58, 155, 100, },
99 { 0, 143, 18, 200, 2, 156, 25, 215, },
100 { 78, 28, 125, 64, 89, 36, 138, 74, },
101 { 10, 180, 3, 161, 16, 195, 8, 175, },
102 {109, 51, 93, 38, 121, 60, 105, 47, },
103 { 1, 152, 23, 210, 0, 147, 20, 205, },
104 { 85, 33, 134, 71, 81, 30, 130, 67, },
105 { 14, 190, 6, 171, 12, 185, 5, 166, },
106 {117, 57, 101, 44, 113, 54, 97, 41, },
107 { 0, 143, 18, 200, 2, 156, 25, 215, },
112 { 0, 124, 8, 193, 0, 140, 12, 213, },
113 { 55, 14, 104, 42, 66, 19, 119, 52, },
114 { 3, 168, 1, 145, 6, 187, 3, 162, },
115 { 86, 31, 70, 21, 99, 39, 82, 28, },
116 { 0, 134, 11, 206, 0, 129, 9, 200, },
117 { 62, 17, 114, 48, 58, 16, 109, 45, },
118 { 5, 181, 2, 157, 4, 175, 1, 151, },
119 { 95, 36, 78, 26, 90, 34, 74, 24, },
120 { 0, 124, 8, 193, 0, 140, 12, 213, },
125 { 0, 107, 3, 187, 0, 125, 6, 212, },
126 { 39, 7, 86, 28, 49, 11, 102, 36, },
127 { 1, 158, 0, 131, 3, 180, 1, 151, },
128 { 68, 19, 52, 12, 81, 25, 64, 17, },
129 { 0, 119, 5, 203, 0, 113, 4, 195, },
130 { 45, 9, 96, 33, 42, 8, 91, 30, },
131 { 2, 172, 1, 144, 2, 165, 0, 137, },
132 { 77, 23, 60, 15, 72, 21, 56, 14, },
133 { 0, 107, 3, 187, 0, 125, 6, 212, },
137 #define output_pixel(pos, val, bias, signedness) \ 139 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ 141 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ 146 int big_endian,
int output_bits)
152 for (i = 0; i < dstW; i++) {
153 int val = src[
i] + (1 << (shift - 1));
160 const int32_t **
src, uint16_t *dest,
int dstW,
161 int big_endian,
int output_bits)
167 for (i = 0; i < dstW; i++) {
168 int val = 1 << (shift - 1);
177 for (j = 0; j < filterSize; j++)
178 val += src[j][i] * (
unsigned)filter[j];
185 const int16_t *chrFilter,
int chrFilterSize,
186 const int16_t **chrUSrc,
const int16_t **chrVSrc,
189 uint16_t *dest = (uint16_t*)dest8;
196 for (i = 0; i < chrDstW; i++) {
197 int u = 1 << (shift - 1);
198 int v = 1 << (shift - 1);
203 for (j = 0; j < chrFilterSize; j++) {
204 u += uSrc[j][
i] * (unsigned)chrFilter[j];
205 v += vSrc[j][
i] * (unsigned)chrFilter[j];
216 static const int big_endian = HAVE_BIGENDIAN;
217 static const int shift = 3;
218 static const float float_mult = 1.0f / 65535.0f;
222 for (i = 0; i < dstW; ++
i){
223 val = src[
i] + (1 << (shift - 1));
225 dest[
i] = float_mult * (float)val_uint;
232 static const int big_endian = HAVE_BIGENDIAN;
233 static const int shift = 3;
234 static const float float_mult = 1.0f / 65535.0f;
238 for (i = 0; i < dstW; ++
i){
239 val = src[
i] + (1 << (shift - 1));
247 float *dest,
int dstW)
249 static const int big_endian = HAVE_BIGENDIAN;
250 static const int shift = 15;
251 static const float float_mult = 1.0f / 65535.0f;
255 for (i = 0; i < dstW; ++
i){
256 val = (1 << (shift - 1)) - 0x40000000;
257 for (j = 0; j < filterSize; ++j){
258 val += src[j][
i] * (unsigned)filter[j];
261 dest[
i] = float_mult * (float)val_uint;
267 uint32_t *dest,
int dstW)
269 static const int big_endian = HAVE_BIGENDIAN;
270 static const int shift = 15;
271 static const float float_mult = 1.0f / 65535.0f;
275 for (i = 0; i < dstW; ++
i){
276 val = (1 << (shift - 1)) - 0x40000000;
277 for (j = 0; j < filterSize; ++j){
278 val += src[j][
i] * (unsigned)filter[j];
285 #define yuv2plane1_float(template, dest_type, BE_LE) \ 286 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \ 287 const uint8_t *dither, int offset) \ 289 template((const int32_t *)src, (dest_type *)dest, dstW); \ 292 #define yuv2planeX_float(template, dest_type, BE_LE) \ 293 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \ 294 const int16_t **src, uint8_t *dest, int dstW, \ 295 const uint8_t *dither, int offset) \ 297 template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \ 314 #define output_pixel(pos, val) \ 316 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \ 318 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \ 322 yuv2plane1_10_c_template(
const int16_t *
src, uint16_t *dest,
int dstW,
323 int big_endian,
int output_bits)
326 int shift = 15 - output_bits;
328 for (i = 0; i < dstW; i++) {
329 int val = src[
i] + (1 << (shift - 1));
336 const int16_t **
src, uint16_t *dest,
int dstW,
337 int big_endian,
int output_bits)
340 int shift = 11 + 16 - output_bits;
342 for (i = 0; i < dstW; i++) {
343 int val = 1 << (shift - 1);
346 for (j = 0; j < filterSize; j++)
347 val += src[j][i] * filter[j];
355 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \ 356 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \ 357 uint8_t *dest, int dstW, \ 358 const uint8_t *dither, int offset)\ 360 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \ 361 (uint16_t *) dest, dstW, is_be, bits); \ 363 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \ 364 const int16_t **src, uint8_t *dest, int dstW, \ 365 const uint8_t *dither, int offset)\ 367 yuv2planeX_## template_size ## _c_template(filter, \ 368 filterSize, (const typeX_t **) src, \ 369 (uint16_t *) dest, dstW, is_be, bits); \ 373 yuv2NBPS(10, BE, 1, 10, int16_t)
374 yuv2NBPS(10, LE, 0, 10, int16_t)
375 yuv2NBPS(12, BE, 1, 10, int16_t)
376 yuv2NBPS(12, LE, 0, 10, int16_t)
377 yuv2NBPS(14, BE, 1, 10, int16_t)
378 yuv2NBPS(14, LE, 0, 10, int16_t)
379 yuv2NBPS(16, BE, 1, 16,
int32_t)
380 yuv2NBPS(16, LE, 0, 16,
int32_t)
382 static
void yuv2planeX_8_c(const int16_t *
filter,
int filterSize,
387 for (i=0; i<dstW; i++) {
388 int val = dither[(i +
offset) & 7] << 12;
390 for (j=0; j<filterSize; j++)
391 val += src[j][i] * filter[j];
401 for (i=0; i<dstW; i++) {
402 int val = (src[
i] + dither[(i +
offset) & 7]) >> 7;
408 const int16_t *chrFilter,
int chrFilterSize,
409 const int16_t **chrUSrc,
const int16_t **chrVSrc,
416 for (i=0; i<chrDstW; i++) {
417 int u = chrDither[i & 7] << 12;
418 int v = chrDither[(i + 3) & 7] << 12;
420 for (j=0; j<chrFilterSize; j++) {
421 u += chrUSrc[j][
i] * chrFilter[j];
422 v += chrVSrc[j][
i] * chrFilter[j];
429 for (i=0; i<chrDstW; i++) {
430 int u = chrDither[i & 7] << 12;
431 int v = chrDither[(i + 3) & 7] << 12;
433 for (j=0; j<chrFilterSize; j++) {
434 u += chrUSrc[j][
i] * chrFilter[j];
435 v += chrVSrc[j][
i] * chrFilter[j];
444 #define output_pixel(pos, val) \ 446 AV_WB16(pos, av_clip_uintp2(val >> shift, 10) << 6); \ 448 AV_WL16(pos, av_clip_uintp2(val >> shift, 10) << 6); \ 452 uint16_t *dest,
int dstW,
458 for (i = 0; i < dstW; i++) {
459 int val = src[
i] + (1 << (shift - 1));
465 const int16_t **
src, uint16_t *dest,
int dstW,
471 for (i = 0; i < dstW; i++) {
472 int val = 1 << (shift - 1);
474 for (j = 0; j < filterSize; j++)
475 val += src[j][i] * filter[j];
482 const int16_t *chrFilter,
int chrFilterSize,
483 const int16_t **chrUSrc,
const int16_t **chrVSrc,
486 uint16_t *dest = (uint16_t*)dest8;
491 for (i = 0; i < chrDstW; i++) {
492 int u = 1 << (shift - 1);
493 int v = 1 << (shift - 1);
495 for (j = 0; j < chrFilterSize; j++) {
496 u += chrUSrc[j][
i] * chrFilter[j];
497 v += chrVSrc[j][
i] * chrFilter[j];
523 yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0);
530 yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1);
536 #define accumulate_bit(acc, val) \ 539 #define output_pixel(pos, acc) \ 540 if (target == AV_PIX_FMT_MONOBLACK) { \ 548 const int16_t **lumSrc,
int lumFilterSize,
549 const int16_t *chrFilter,
const int16_t **chrUSrc,
550 const int16_t **chrVSrc,
int chrFilterSize,
551 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
559 for (i = 0; i < dstW; i += 2) {
564 for (j = 0; j < lumFilterSize; j++) {
565 Y1 += lumSrc[j][
i] * lumFilter[j];
566 Y2 += lumSrc[j][i+1] * lumFilter[j];
570 if ((Y1 | Y2) & 0x100) {
577 acc = 2*acc + (Y1 >= 128);
582 acc = 2*acc + (err >= 128);
601 const int16_t *ubuf[2],
const int16_t *vbuf[2],
602 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
603 int yalpha,
int uvalpha,
int y,
606 const int16_t *buf0 = buf[0], *buf1 = buf[1];
608 int yalpha1 = 4096 - yalpha;
615 for (i = 0; i < dstW; i +=2) {
618 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
621 acc = 2*acc + (Y >= 128);
624 err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
627 acc = 2*acc + (err >= 128);
635 for (i = 0; i < dstW; i += 8) {
638 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
640 Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
642 Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
644 Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
646 Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
648 Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
650 Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
652 Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
662 const int16_t *ubuf[2],
const int16_t *vbuf[2],
663 const int16_t *abuf0,
uint8_t *dest,
int dstW,
672 for (i = 0; i < dstW; i +=2) {
675 Y = ((buf0[i + 0] + 64) >> 7);
678 acc = 2*acc + (Y >= 128);
681 err = ((buf0[i + 1] + 64) >> 7);
684 acc = 2*acc + (err >= 128);
692 for (i = 0; i < dstW; i += 8) {
709 #undef accumulate_bit 711 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ 712 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ 713 const int16_t **lumSrc, int lumFilterSize, \ 714 const int16_t *chrFilter, const int16_t **chrUSrc, \ 715 const int16_t **chrVSrc, int chrFilterSize, \ 716 const int16_t **alpSrc, uint8_t *dest, int dstW, \ 719 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ 720 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 721 alpSrc, dest, dstW, y, fmt); \ 724 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ 725 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 726 const int16_t *abuf[2], uint8_t *dest, int dstW, \ 727 int yalpha, int uvalpha, int y) \ 729 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ 730 dest, dstW, yalpha, uvalpha, y, fmt); \ 733 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ 734 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 735 const int16_t *abuf0, uint8_t *dest, int dstW, \ 736 int uvalpha, int y) \ 738 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \ 739 abuf0, dest, dstW, uvalpha, \ 746 #define output_pixels(pos, Y1, U, Y2, V) \ 747 if (target == AV_PIX_FMT_YUYV422) { \ 748 dest[pos + 0] = Y1; \ 750 dest[pos + 2] = Y2; \ 752 } else if (target == AV_PIX_FMT_YVYU422) { \ 753 dest[pos + 0] = Y1; \ 755 dest[pos + 2] = Y2; \ 759 dest[pos + 1] = Y1; \ 761 dest[pos + 3] = Y2; \ 766 const int16_t **lumSrc,
int lumFilterSize,
767 const int16_t *chrFilter,
const int16_t **chrUSrc,
768 const int16_t **chrVSrc,
int chrFilterSize,
769 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
774 for (i = 0; i < ((dstW + 1) >> 1); i++) {
781 for (j = 0; j < lumFilterSize; j++) {
782 Y1 += lumSrc[j][i * 2] * lumFilter[j];
783 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
785 for (j = 0; j < chrFilterSize; j++) {
786 U += chrUSrc[j][
i] * chrFilter[j];
787 V += chrVSrc[j][
i] * chrFilter[j];
793 if ((Y1 | Y2 | U | V) & 0x100) {
805 const int16_t *ubuf[2],
const int16_t *vbuf[2],
806 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
807 int yalpha,
int uvalpha,
int y,
810 const int16_t *buf0 = buf[0], *buf1 = buf[1],
811 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
812 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
813 int yalpha1 = 4096 - yalpha;
814 int uvalpha1 = 4096 - uvalpha;
819 for (i = 0; i < ((dstW + 1) >> 1); i++) {
820 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
821 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
822 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha) >> 19;
823 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha) >> 19;
825 if ((Y1 | Y2 | U | V) & 0x100) {
838 const int16_t *ubuf[2],
const int16_t *vbuf[2],
839 const int16_t *abuf0,
uint8_t *dest,
int dstW,
842 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
845 if (uvalpha < 2048) {
846 for (i = 0; i < ((dstW + 1) >> 1); i++) {
847 int Y1 = (buf0[i * 2 ]+64) >> 7;
848 int Y2 = (buf0[i * 2 + 1]+64) >> 7;
849 int U = (ubuf0[
i] +64) >> 7;
850 int V = (vbuf0[
i] +64) >> 7;
852 if ((Y1 | Y2 | U | V) & 0x100) {
862 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
863 for (i = 0; i < ((dstW + 1) >> 1); i++) {
864 int Y1 = (buf0[i * 2 ] + 64) >> 7;
865 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
866 int U = (ubuf0[
i] + ubuf1[
i]+128) >> 8;
867 int V = (vbuf0[
i] + vbuf1[
i]+128) >> 8;
869 if ((Y1 | Y2 | U | V) & 0x100) {
887 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B) 888 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R) 889 #define output_pixel(pos, val) \ 890 if (isBE(target)) { \ 898 const int32_t **lumSrc,
int lumFilterSize,
899 const int16_t *chrFilter,
const int32_t **unused_chrUSrc,
900 const int32_t **unused_chrVSrc,
int unused_chrFilterSize,
901 const int32_t **alpSrc, uint16_t *dest,
int dstW,
902 int y,
enum AVPixelFormat target,
int unused_hasAlpha,
int unused_eightbytes)
904 int hasAlpha = !!alpSrc;
907 for (i = 0; i < dstW; i++) {
912 for (j = 0; j < lumFilterSize; j++)
913 Y += lumSrc[j][i] * lumFilter[j];
916 Y += (1<<3) + 0x8000;
920 A = -0x40000000 + (1<<14);
921 for (j = 0; j < lumFilterSize; j++)
922 A += alpSrc[j][i] * lumFilter[j];
937 const int32_t *abuf[2], uint16_t *dest,
int dstW,
938 int yalpha,
int unused_uvalpha,
int y,
939 enum AVPixelFormat target,
int unused_hasAlpha,
int unused_eightbytes)
941 int hasAlpha = abuf && abuf[0] && abuf[1];
942 const int32_t *buf0 = buf[0], *buf1 = buf[1],
943 *abuf0 = hasAlpha ? abuf[0] :
NULL,
944 *abuf1 = hasAlpha ? abuf[1] :
NULL;
945 int yalpha1 = 4096 - yalpha;
950 for (i = 0; i < dstW; i++) {
951 int Y = (buf0[
i] * yalpha1 + buf1[
i] * yalpha) >> 15;
957 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha) >> 15;
969 const int32_t *abuf0, uint16_t *dest,
int dstW,
970 int unused_uvalpha,
int y,
enum AVPixelFormat target,
int unused_hasAlpha,
int unused_eightbytes)
972 int hasAlpha = !!abuf0;
975 for (i = 0; i < dstW; i++) {
976 int Y = buf0[
i] >> 3;
994 const int32_t **lumSrc,
int lumFilterSize,
995 const int16_t *chrFilter,
const int32_t **chrUSrc,
996 const int32_t **chrVSrc,
int chrFilterSize,
997 const int32_t **alpSrc, uint16_t *dest,
int dstW,
998 int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1001 int A1 = 0xffff<<14,
A2 = 0xffff<<14;
1003 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1005 int Y1 = -0x40000000;
1006 int Y2 = -0x40000000;
1007 int U = -(128 << 23);
1008 int V = -(128 << 23);
1011 for (j = 0; j < lumFilterSize; j++) {
1012 Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
1013 Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1015 for (j = 0; j < chrFilterSize; j++) {;
1016 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
1017 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
1023 for (j = 0; j < lumFilterSize; j++) {
1024 A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
1025 A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1077 const int32_t *abuf[2], uint16_t *dest,
int dstW,
1078 int yalpha,
int uvalpha,
int y,
1081 const int32_t *buf0 = buf[0], *buf1 = buf[1],
1082 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1083 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1084 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1085 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1086 int yalpha1 = 4096 - yalpha;
1087 int uvalpha1 = 4096 - uvalpha;
1089 int A1 = 0xffff<<14,
A2 = 0xffff<<14;
1094 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1095 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
1096 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
1097 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha - (128 << 23)) >> 14;
1098 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha - (128 << 23)) >> 14;
1113 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
1114 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
1142 const int32_t *abuf0, uint16_t *dest,
int dstW,
1143 int uvalpha,
int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1145 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1147 int A1 = 0xffff<<14,
A2= 0xffff<<14;
1149 if (uvalpha < 2048) {
1150 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1151 int Y1 = (buf0[i * 2] ) >> 2;
1152 int Y2 = (buf0[i * 2 + 1]) >> 2;
1153 int U = (ubuf0[
i] - (128 << 11)) >> 2;
1154 int V = (vbuf0[
i] - (128 << 11)) >> 2;
1165 A1 = abuf0[i * 2 ] << 11;
1166 A2 = abuf0[i * 2 + 1] << 11;
1194 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1195 int A1 = 0xffff<<14,
A2 = 0xffff<<14;
1196 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1197 int Y1 = (buf0[i * 2] ) >> 2;
1198 int Y2 = (buf0[i * 2 + 1]) >> 2;
1199 int U = (ubuf0[
i] + ubuf1[
i] - (128 << 12)) >> 3;
1200 int V = (vbuf0[
i] + vbuf1[
i] - (128 << 12)) >> 3;
1211 A1 = abuf0[i * 2 ] << 11;
1212 A2 = abuf0[i * 2 + 1] << 11;
1244 const int32_t **lumSrc,
int lumFilterSize,
1245 const int16_t *chrFilter,
const int32_t **chrUSrc,
1246 const int32_t **chrVSrc,
int chrFilterSize,
1247 const int32_t **alpSrc, uint16_t *dest,
int dstW,
1248 int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1253 for (i = 0; i < dstW; i++) {
1255 int Y = -0x40000000;
1256 int U = -(128 << 23);
1257 int V = -(128 << 23);
1260 for (j = 0; j < lumFilterSize; j++) {
1261 Y += lumSrc[j][
i] * (unsigned)lumFilter[j];
1263 for (j = 0; j < chrFilterSize; j++) {;
1264 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
1265 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
1270 for (j = 0; j < lumFilterSize; j++) {
1271 A += alpSrc[j][
i] * (unsigned)lumFilter[j];
1309 const int32_t *abuf[2], uint16_t *dest,
int dstW,
1310 int yalpha,
int uvalpha,
int y,
1313 const int32_t *buf0 = buf[0], *buf1 = buf[1],
1314 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1315 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1316 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1317 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1318 int yalpha1 = 4096 - yalpha;
1319 int uvalpha1 = 4096 - uvalpha;
1326 for (i = 0; i < dstW; i++) {
1327 int Y = (buf0[
i] * yalpha1 + buf1[
i] * yalpha) >> 14;
1328 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha - (128 << 23)) >> 14;
1329 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha - (128 << 23)) >> 14;
1341 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha) >> 1;
1361 const int32_t *abuf0, uint16_t *dest,
int dstW,
1362 int uvalpha,
int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1364 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1368 if (uvalpha < 2048) {
1369 for (i = 0; i < dstW; i++) {
1370 int Y = (buf0[
i]) >> 2;
1371 int U = (ubuf0[
i] - (128 << 11)) >> 2;
1372 int V = (vbuf0[
i] - (128 << 11)) >> 2;
1400 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1402 for (i = 0; i < dstW; i++) {
1403 int Y = (buf0[
i] ) >> 2;
1404 int U = (ubuf0[
i] + ubuf1[
i] - (128 << 12)) >> 3;
1405 int V = (vbuf0[
i] + vbuf1[
i] - (128 << 12)) >> 3;
1439 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \ 1440 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ 1441 const int16_t **_lumSrc, int lumFilterSize, \ 1442 const int16_t *chrFilter, const int16_t **_chrUSrc, \ 1443 const int16_t **_chrVSrc, int chrFilterSize, \ 1444 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \ 1447 const int32_t **lumSrc = (const int32_t **) _lumSrc, \ 1448 **chrUSrc = (const int32_t **) _chrUSrc, \ 1449 **chrVSrc = (const int32_t **) _chrVSrc, \ 1450 **alpSrc = (const int32_t **) _alpSrc; \ 1451 uint16_t *dest = (uint16_t *) _dest; \ 1452 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ 1453 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 1454 alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \ 1457 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \ 1458 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ 1459 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \ 1460 int yalpha, int uvalpha, int y) \ 1462 const int32_t **buf = (const int32_t **) _buf, \ 1463 **ubuf = (const int32_t **) _ubuf, \ 1464 **vbuf = (const int32_t **) _vbuf, \ 1465 **abuf = (const int32_t **) _abuf; \ 1466 uint16_t *dest = (uint16_t *) _dest; \ 1467 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ 1468 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \ 1471 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \ 1472 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ 1473 const int16_t *_abuf0, uint8_t *_dest, int dstW, \ 1474 int uvalpha, int y) \ 1476 const int32_t *buf0 = (const int32_t *) _buf0, \ 1477 **ubuf = (const int32_t **) _ubuf, \ 1478 **vbuf = (const int32_t **) _vbuf, \ 1479 *abuf0 = (const int32_t *) _abuf0; \ 1480 uint16_t *dest = (uint16_t *) _dest; \ 1481 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ 1482 dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \ 1523 unsigned A1,
unsigned A2,
1524 const
void *_r, const
void *_g, const
void *_b,
int y,
1529 uint32_t *dest = (uint32_t *) _dest;
1530 const uint32_t *
r = (
const uint32_t *) _r;
1531 const uint32_t *
g = (
const uint32_t *) _g;
1532 const uint32_t *
b = (
const uint32_t *) _b;
1537 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1538 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1543 av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1544 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1545 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1547 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 1550 av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1552 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1553 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1562 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b) 1563 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r) 1565 dest[i * 6 + 0] =
r_b[Y1];
1566 dest[i * 6 + 1] = g[Y1];
1567 dest[i * 6 + 2] =
b_r[Y1];
1568 dest[i * 6 + 3] =
r_b[Y2];
1569 dest[i * 6 + 4] = g[Y2];
1570 dest[i * 6 + 5] =
b_r[Y2];
1576 uint16_t *dest = (uint16_t *) _dest;
1577 const uint16_t *
r = (
const uint16_t *) _r;
1578 const uint16_t *
g = (
const uint16_t *) _g;
1579 const uint16_t *
b = (
const uint16_t *) _b;
1580 int dr1, dg1, db1, dr2, dg2, db2;
1605 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1606 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1608 uint32_t *dest = (uint32_t *) _dest;
1609 const uint32_t *
r = (
const uint32_t *) _r;
1610 const uint32_t *
g = (
const uint32_t *) _g;
1611 const uint32_t *
b = (
const uint32_t *) _b;
1612 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1613 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1619 int dr1, dg1, db1, dr2, dg2, db2;
1624 dr1 = dg1 = d32[(i * 2 + 0) & 7];
1625 db1 = d64[(i * 2 + 0) & 7];
1626 dr2 = dg2 = d32[(i * 2 + 1) & 7];
1627 db2 = d64[(i * 2 + 1) & 7];
1631 dr1 = db1 = d128[(i * 2 + 0) & 7];
1632 dg1 = d64[(i * 2 + 0) & 7];
1633 dr2 = db2 = d128[(i * 2 + 1) & 7];
1634 dg2 = d64[(i * 2 + 1) & 7];
1638 dest[
i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1639 ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1641 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1642 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1649 const int16_t **lumSrc,
int lumFilterSize,
1650 const int16_t *chrFilter,
const int16_t **chrUSrc,
1651 const int16_t **chrVSrc,
int chrFilterSize,
1652 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
1657 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1663 const void *
r, *
g, *
b;
1665 for (j = 0; j < lumFilterSize; j++) {
1666 Y1 += lumSrc[j][i * 2] * lumFilter[j];
1667 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1669 for (j = 0; j < chrFilterSize; j++) {
1670 U += chrUSrc[j][
i] * chrFilter[j];
1671 V += chrVSrc[j][
i] * chrFilter[j];
1680 for (j = 0; j < lumFilterSize; j++) {
1681 A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1682 A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1686 if ((A1 | A2) & 0x100) {
1696 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1697 r, g, b, y, target, hasAlpha);
1703 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1704 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
1705 int yalpha,
int uvalpha,
int y,
1708 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1709 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1710 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1711 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1712 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1713 int yalpha1 = 4096 - yalpha;
1714 int uvalpha1 = 4096 - uvalpha;
1719 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1720 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1721 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1722 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha) >> 19;
1723 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha) >> 19;
1730 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1731 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1736 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1737 r,
g,
b, y, target, hasAlpha);
1743 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1744 const int16_t *abuf0,
uint8_t *dest,
int dstW,
1748 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1751 if (uvalpha < 2048) {
1752 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1753 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1754 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1755 int U = (ubuf0[
i] + 64) >> 7;
1756 int V = (vbuf0[
i] + 64) >> 7;
1763 A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1764 A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1769 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1770 r,
g,
b, y, target, hasAlpha);
1773 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1774 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1775 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1776 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1777 int U = (ubuf0[
i] + ubuf1[
i] + 128) >> 8;
1778 int V = (vbuf0[
i] + vbuf1[
i] + 128) >> 8;
1785 A1 = (abuf0[i * 2 ] + 64) >> 7;
1786 A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1791 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1792 r,
g,
b, y, target, hasAlpha);
1797 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ 1798 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ 1799 const int16_t **lumSrc, int lumFilterSize, \ 1800 const int16_t *chrFilter, const int16_t **chrUSrc, \ 1801 const int16_t **chrVSrc, int chrFilterSize, \ 1802 const int16_t **alpSrc, uint8_t *dest, int dstW, \ 1805 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ 1806 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ 1807 alpSrc, dest, dstW, y, fmt, hasAlpha); \ 1810 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \ 1811 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ 1812 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ 1813 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1814 const int16_t *abuf[2], uint8_t *dest, int dstW, \ 1815 int yalpha, int uvalpha, int y) \ 1817 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ 1818 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \ 1821 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \ 1822 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \ 1823 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ 1824 const int16_t *ubuf[2], const int16_t *vbuf[2], \ 1825 const int16_t *abuf0, uint8_t *dest, int dstW, \ 1826 int uvalpha, int y) \ 1828 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ 1829 dstW, uvalpha, y, fmt, hasAlpha); \ 1836 #if CONFIG_SWSCALE_ALPHA 1860 Y -= c->yuv2rgb_y_offset;
1861 Y *= c->yuv2rgb_y_coeff;
1863 R = (unsigned)Y + V*c->yuv2rgb_v2r_coeff;
1864 G = (
unsigned)Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1865 B = (unsigned)Y + U*c->yuv2rgb_u2b_coeff;
1866 if ((R | G | B) & 0xC0000000) {
1874 dest[0] = hasAlpha ? A : 255;
1888 dest[3] = hasAlpha ? A : 255;
1891 dest[0] = hasAlpha ? A : 255;
1896 case AV_PIX_FMT_BGR24:
1905 dest[3] = hasAlpha ? A : 255;
1908 case AV_PIX_FMT_RGB4_BYTE:
1910 case AV_PIX_FMT_RGB8:
1914 switch (c->dither) {
1921 R += (7*err[0] + 1*c->dither_error[0][
i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1922 G += (7*err[1] + 1*c->dither_error[1][
i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1923 B += (7*err[2] + 1*c->dither_error[2][
i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1924 c->dither_error[0][
i] = err[0];
1925 c->dither_error[1][
i] = err[1];
1926 c->dither_error[2][
i] = err[2];
1927 r = R >> (isrgb8 ? 5 : 7);
1928 g = G >> (isrgb8 ? 5 : 6);
1929 b = B >> (isrgb8 ? 6 : 7);
1930 r =
av_clip(r, 0, isrgb8 ? 7 : 1);
1931 g =
av_clip(g, 0, isrgb8 ? 7 : 3);
1932 b =
av_clip(b, 0, isrgb8 ? 3 : 1);
1933 err[0] = R - r*(isrgb8 ? 36 : 255);
1934 err[1] = G - g*(isrgb8 ? 36 : 85);
1935 err[2] = B - b*(isrgb8 ? 85 : 255);
1940 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff)) 1941 r = (((R >> 19) +
A_DITHER(i,y) -96)>>8);
1942 g = (((G >> 19) +
A_DITHER(i + 17,y) - 96)>>8);
1943 b = (((B >> 20) +
A_DITHER(i + 17*2,y) -96)>>8);
1948 r = (((R >> 21) +
A_DITHER(i,y)-256)>>8);
1949 g = (((G >> 19) +
A_DITHER(i + 17,y)-256)>>8);
1950 b = (((B >> 21) +
A_DITHER(i + 17*2,y)-256)>>8);
1959 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2) 1960 r = (((R >> 19) +
X_DITHER(i,y) - 96)>>8);
1961 g = (((G >> 19) +
X_DITHER(i + 17,y) - 96)>>8);
1962 b = (((B >> 20) +
X_DITHER(i + 17*2,y) - 96)>>8);
1967 r = (((R >> 21) +
X_DITHER(i,y)-256)>>8);
1968 g = (((G >> 19) +
X_DITHER(i + 17,y)-256)>>8);
1969 b = (((B >> 21) +
X_DITHER(i + 17*2,y)-256)>>8);
1979 dest[0] = r + 2*g + 8*
b;
1980 }
else if(target == AV_PIX_FMT_RGB4_BYTE) {
1981 dest[0] = b + 2*g + 8*
r;
1983 dest[0] = r + 8*g + 64*
b;
1984 }
else if(target == AV_PIX_FMT_RGB8) {
1985 dest[0] = b + 4*g + 32*
r;
1994 const int16_t **lumSrc,
int lumFilterSize,
1995 const int16_t *chrFilter,
const int16_t **chrUSrc,
1996 const int16_t **chrVSrc,
int chrFilterSize,
1997 const int16_t **alpSrc,
uint8_t *dest,
2009 for (i = 0; i < dstW; i++) {
2012 int U = (1<<9)-(128 << 19);
2013 int V = (1<<9)-(128 << 19);
2015 for (j = 0; j < lumFilterSize; j++) {
2016 Y += lumSrc[j][
i] * lumFilter[j];
2018 for (j = 0; j < chrFilterSize; j++) {
2019 U += chrUSrc[j][
i] * chrFilter[j];
2020 V += chrVSrc[j][
i] * chrFilter[j];
2027 for (j = 0; j < lumFilterSize; j++) {
2028 A += alpSrc[j][
i] * lumFilter[j];
2034 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2044 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2045 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
2046 int yalpha,
int uvalpha,
int y,
2049 const int16_t *buf0 = buf[0], *buf1 = buf[1],
2050 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2051 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2052 *abuf0 = hasAlpha ? abuf[0] :
NULL,
2053 *abuf1 = hasAlpha ? abuf[1] :
NULL;
2054 int yalpha1 = 4096 - yalpha;
2055 int uvalpha1 = 4096 - uvalpha;
2068 for (i = 0; i < dstW; i++) {
2069 int Y = ( buf0[
i] * yalpha1 + buf1[
i] * yalpha ) >> 10;
2070 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha-(128 << 19)) >> 10;
2071 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha-(128 << 19)) >> 10;
2074 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha + (1<<18)) >> 19;
2079 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2089 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2090 const int16_t *abuf0,
uint8_t *dest,
int dstW,
2094 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2103 if (uvalpha < 2048) {
2105 for (i = 0; i < dstW; i++) {
2106 int Y = buf0[
i] * 4;
2107 int U = (ubuf0[
i] - (128<<7)) * 4;
2108 int V = (vbuf0[
i] - (128<<7)) * 4;
2111 A = (abuf0[
i] + 64) >> 7;
2116 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2120 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2122 for (i = 0; i < dstW; i++) {
2123 int Y = buf0[
i] * 4;
2124 int U = (ubuf0[
i] + ubuf1[
i] - (128<<8)) * 2;
2125 int V = (vbuf0[
i] + vbuf1[
i] - (128<<8)) * 2;
2128 A = (abuf0[
i] + 64) >> 7;
2133 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2149 #if CONFIG_SWSCALE_ALPHA 2170 const int16_t **lumSrc,
int lumFilterSize,
2171 const int16_t *chrFilter,
const int16_t **chrUSrc,
2172 const int16_t **chrVSrc,
int chrFilterSize,
2173 const int16_t **alpSrc,
uint8_t **dest,
2179 uint16_t **dest16 = (uint16_t**)dest;
2183 for (i = 0; i < dstW; i++) {
2186 int U = (1 << 9) - (128 << 19);
2187 int V = (1 << 9) - (128 << 19);
2190 for (j = 0; j < lumFilterSize; j++)
2191 Y += lumSrc[j][i] * lumFilter[j];
2193 for (j = 0; j < chrFilterSize; j++) {
2194 U += chrUSrc[j][
i] * chrFilter[j];
2195 V += chrVSrc[j][
i] * chrFilter[j];
2205 for (j = 0; j < lumFilterSize; j++)
2206 A += alpSrc[j][i] * lumFilter[j];
2219 if ((R | G | B) & 0xC0000000) {
2226 dest16[0][
i] = G >>
SH;
2227 dest16[1][
i] = B >>
SH;
2228 dest16[2][
i] = R >>
SH;
2230 dest16[3][
i] = A >> (SH - 3);
2232 dest[0][
i] = G >> 22;
2233 dest[1][
i] = B >> 22;
2234 dest[2][
i] = R >> 22;
2236 dest[3][
i] = A >> 19;
2239 if (SH != 22 && (!
isBE(c->
dstFormat)) != (!HAVE_BIGENDIAN)) {
2240 for (i = 0; i < dstW; i++) {
2252 const int16_t **lumSrcx,
int lumFilterSize,
2253 const int16_t *chrFilter,
const int16_t **chrUSrcx,
2254 const int16_t **chrVSrcx,
int chrFilterSize,
2255 const int16_t **alpSrcx,
uint8_t **dest,
2261 uint16_t **dest16 = (uint16_t**)dest;
2267 for (i = 0; i < dstW; i++) {
2269 int Y = -0x40000000;
2270 int U = -(128 << 23);
2271 int V = -(128 << 23);
2274 for (j = 0; j < lumFilterSize; j++)
2275 Y += lumSrc[j][i] * (
unsigned)lumFilter[j];
2277 for (j = 0; j < chrFilterSize; j++) {
2278 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
2279 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
2290 for (j = 0; j < lumFilterSize; j++)
2291 A += alpSrc[j][i] * (
unsigned)lumFilter[j];
2308 dest16[0][
i] = G >> 14;
2309 dest16[1][
i] = B >> 14;
2310 dest16[2][
i] = R >> 14;
2315 for (i = 0; i < dstW; i++) {
2327 const int16_t **lumSrcx,
int lumFilterSize,
2328 const int16_t *chrFilter,
const int16_t **chrUSrcx,
2329 const int16_t **chrVSrcx,
int chrFilterSize,
2330 const int16_t **alpSrcx,
uint8_t **dest,
2336 uint32_t **dest32 = (uint32_t**)dest;
2341 static const float float_mult = 1.0f / 65535.0f;
2343 for (i = 0; i < dstW; i++) {
2345 int Y = -0x40000000;
2346 int U = -(128 << 23);
2347 int V = -(128 << 23);
2350 for (j = 0; j < lumFilterSize; j++)
2351 Y += lumSrc[j][i] * (
unsigned)lumFilter[j];
2353 for (j = 0; j < chrFilterSize; j++) {
2354 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
2355 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
2366 for (j = 0; j < lumFilterSize; j++)
2367 A += alpSrc[j][i] * (
unsigned)lumFilter[j];
2391 for (i = 0; i < dstW; i++) {
2403 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2404 const int16_t *abuf0,
uint8_t *dest,
int dstW,
2407 int hasAlpha = !!abuf0;
2410 for (i = 0; i < dstW; i++) {
2411 int Y = (buf0[
i] + 64) >> 7;
2417 A = (abuf0[
i] + 64) >> 7;
2423 dest[i * 2 + 1] = hasAlpha ? A : 255;
2429 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2430 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
2431 int yalpha,
int uvalpha,
int y)
2433 int hasAlpha = abuf && abuf[0] && abuf[1];
2434 const int16_t *buf0 = buf[0], *buf1 = buf[1],
2435 *abuf0 = hasAlpha ? abuf[0] :
NULL,
2436 *abuf1 = hasAlpha ? abuf[1] :
NULL;
2437 int yalpha1 = 4096 - yalpha;
2442 for (i = 0; i < dstW; i++) {
2443 int Y = (buf0[
i] * yalpha1 + buf1[
i] * yalpha) >> 19;
2449 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha) >> 19;
2454 dest[i * 2 + 1] = hasAlpha ? A : 255;
2460 const int16_t **lumSrc,
int lumFilterSize,
2461 const int16_t *chrFilter,
const int16_t **chrUSrc,
2462 const int16_t **chrVSrc,
int chrFilterSize,
2463 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
int y)
2465 int hasAlpha = !!alpSrc;
2468 for (i = 0; i < dstW; i++) {
2470 int Y = 1 << 18,
A = 1 << 18;
2472 for (j = 0; j < lumFilterSize; j++)
2473 Y += lumSrc[j][i] * lumFilter[j];
2480 for (j = 0; j < lumFilterSize; j++)
2481 A += alpSrc[j][i] * lumFilter[j];
2490 dest[2 * i + 1] = hasAlpha ?
A : 255;
2496 const int16_t **_lumSrc,
int lumFilterSize,
2497 const int16_t *chrFilter,
const int16_t **_chrUSrc,
2498 const int16_t **_chrVSrc,
int chrFilterSize,
2499 const int16_t **_alpSrc,
uint8_t *dest,
int dstW,
int y)
2502 **chrUSrc = (
const int32_t **) _chrUSrc,
2503 **chrVSrc = (
const int32_t **) _chrVSrc,
2504 **alpSrc = (
const int32_t **) _alpSrc;
2505 int hasAlpha = !!alpSrc;
2508 for (i = 0; i < dstW; i++) {
2509 int Y = 1 << 14,
U = 1 << 14;
2510 int V = 1 << 14,
A = 1 << 14;
2518 for (j = 0; j < lumFilterSize; j++)
2519 Y += lumSrc[j][i] * (
unsigned)lumFilter[j];
2521 for (j = 0; j < chrFilterSize; j++)
2522 U += chrUSrc[j][i] * (
unsigned)chrFilter[j];
2524 for (j = 0; j < chrFilterSize; j++)
2525 V += chrVSrc[j][i] * (
unsigned)chrFilter[j];
2528 for (j = 0; j < lumFilterSize; j++)
2529 A += alpSrc[j][i] * (
unsigned)lumFilter[j];
2536 AV_WL16(dest + 8 * i, hasAlpha ?
A : 65535);
2559 }
else if (
is16BPS(dstFormat)) {
2560 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2561 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2565 }
else if (
isNBPS(dstFormat)) {
2567 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2568 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2570 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2571 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2573 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2574 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2576 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2577 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2581 *yuv2planeX = yuv2planeX_floatBE_c;
2582 *yuv2plane1 = yuv2plane1_floatBE_c;
2584 *yuv2planeX = yuv2planeX_floatLE_c;
2585 *yuv2plane1 = yuv2plane1_floatLE_c;
2588 *yuv2planeX = yuv2planeX_8_c;
2595 switch (dstFormat) {
2598 *yuv2packedX = yuv2rgba32_full_X_c;
2599 *yuv2packed2 = yuv2rgba32_full_2_c;
2600 *yuv2packed1 = yuv2rgba32_full_1_c;
2602 #if CONFIG_SWSCALE_ALPHA 2604 *yuv2packedX = yuv2rgba32_full_X_c;
2605 *yuv2packed2 = yuv2rgba32_full_2_c;
2606 *yuv2packed1 = yuv2rgba32_full_1_c;
2610 *yuv2packedX = yuv2rgbx32_full_X_c;
2611 *yuv2packed2 = yuv2rgbx32_full_2_c;
2612 *yuv2packed1 = yuv2rgbx32_full_1_c;
2618 *yuv2packedX = yuv2argb32_full_X_c;
2619 *yuv2packed2 = yuv2argb32_full_2_c;
2620 *yuv2packed1 = yuv2argb32_full_1_c;
2622 #if CONFIG_SWSCALE_ALPHA 2624 *yuv2packedX = yuv2argb32_full_X_c;
2625 *yuv2packed2 = yuv2argb32_full_2_c;
2626 *yuv2packed1 = yuv2argb32_full_1_c;
2630 *yuv2packedX = yuv2xrgb32_full_X_c;
2631 *yuv2packed2 = yuv2xrgb32_full_2_c;
2632 *yuv2packed1 = yuv2xrgb32_full_1_c;
2638 *yuv2packedX = yuv2bgra32_full_X_c;
2639 *yuv2packed2 = yuv2bgra32_full_2_c;
2640 *yuv2packed1 = yuv2bgra32_full_1_c;
2642 #if CONFIG_SWSCALE_ALPHA 2644 *yuv2packedX = yuv2bgra32_full_X_c;
2645 *yuv2packed2 = yuv2bgra32_full_2_c;
2646 *yuv2packed1 = yuv2bgra32_full_1_c;
2650 *yuv2packedX = yuv2bgrx32_full_X_c;
2651 *yuv2packed2 = yuv2bgrx32_full_2_c;
2652 *yuv2packed1 = yuv2bgrx32_full_1_c;
2658 *yuv2packedX = yuv2abgr32_full_X_c;
2659 *yuv2packed2 = yuv2abgr32_full_2_c;
2660 *yuv2packed1 = yuv2abgr32_full_1_c;
2662 #if CONFIG_SWSCALE_ALPHA 2664 *yuv2packedX = yuv2abgr32_full_X_c;
2665 *yuv2packed2 = yuv2abgr32_full_2_c;
2666 *yuv2packed1 = yuv2abgr32_full_1_c;
2670 *yuv2packedX = yuv2xbgr32_full_X_c;
2671 *yuv2packed2 = yuv2xbgr32_full_2_c;
2672 *yuv2packed1 = yuv2xbgr32_full_1_c;
2677 #if CONFIG_SWSCALE_ALPHA 2679 *yuv2packedX = yuv2rgba64le_full_X_c;
2680 *yuv2packed2 = yuv2rgba64le_full_2_c;
2681 *yuv2packed1 = yuv2rgba64le_full_1_c;
2685 *yuv2packedX = yuv2rgbx64le_full_X_c;
2686 *yuv2packed2 = yuv2rgbx64le_full_2_c;
2687 *yuv2packed1 = yuv2rgbx64le_full_1_c;
2691 #if CONFIG_SWSCALE_ALPHA 2693 *yuv2packedX = yuv2rgba64be_full_X_c;
2694 *yuv2packed2 = yuv2rgba64be_full_2_c;
2695 *yuv2packed1 = yuv2rgba64be_full_1_c;
2699 *yuv2packedX = yuv2rgbx64be_full_X_c;
2700 *yuv2packed2 = yuv2rgbx64be_full_2_c;
2701 *yuv2packed1 = yuv2rgbx64be_full_1_c;
2705 #if CONFIG_SWSCALE_ALPHA 2707 *yuv2packedX = yuv2bgra64le_full_X_c;
2708 *yuv2packed2 = yuv2bgra64le_full_2_c;
2709 *yuv2packed1 = yuv2bgra64le_full_1_c;
2713 *yuv2packedX = yuv2bgrx64le_full_X_c;
2714 *yuv2packed2 = yuv2bgrx64le_full_2_c;
2715 *yuv2packed1 = yuv2bgrx64le_full_1_c;
2719 #if CONFIG_SWSCALE_ALPHA 2721 *yuv2packedX = yuv2bgra64be_full_X_c;
2722 *yuv2packed2 = yuv2bgra64be_full_2_c;
2723 *yuv2packed1 = yuv2bgra64be_full_1_c;
2727 *yuv2packedX = yuv2bgrx64be_full_X_c;
2728 *yuv2packed2 = yuv2bgrx64be_full_2_c;
2729 *yuv2packed1 = yuv2bgrx64be_full_1_c;
2734 *yuv2packedX = yuv2rgb24_full_X_c;
2735 *yuv2packed2 = yuv2rgb24_full_2_c;
2736 *yuv2packed1 = yuv2rgb24_full_1_c;
2739 *yuv2packedX = yuv2bgr24_full_X_c;
2740 *yuv2packed2 = yuv2bgr24_full_2_c;
2741 *yuv2packed1 = yuv2bgr24_full_1_c;
2744 *yuv2packedX = yuv2rgb48le_full_X_c;
2745 *yuv2packed2 = yuv2rgb48le_full_2_c;
2746 *yuv2packed1 = yuv2rgb48le_full_1_c;
2749 *yuv2packedX = yuv2bgr48le_full_X_c;
2750 *yuv2packed2 = yuv2bgr48le_full_2_c;
2751 *yuv2packed1 = yuv2bgr48le_full_1_c;
2754 *yuv2packedX = yuv2rgb48be_full_X_c;
2755 *yuv2packed2 = yuv2rgb48be_full_2_c;
2756 *yuv2packed1 = yuv2rgb48be_full_1_c;
2759 *yuv2packedX = yuv2bgr48be_full_X_c;
2760 *yuv2packed2 = yuv2bgr48be_full_2_c;
2761 *yuv2packed1 = yuv2bgr48be_full_1_c;
2764 *yuv2packedX = yuv2bgr4_byte_full_X_c;
2765 *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2766 *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2769 *yuv2packedX = yuv2rgb4_byte_full_X_c;
2770 *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2771 *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2774 *yuv2packedX = yuv2bgr8_full_X_c;
2775 *yuv2packed2 = yuv2bgr8_full_2_c;
2776 *yuv2packed1 = yuv2bgr8_full_1_c;
2779 *yuv2packedX = yuv2rgb8_full_X_c;
2780 *yuv2packed2 = yuv2rgb8_full_2_c;
2781 *yuv2packed1 = yuv2rgb8_full_1_c;
2812 if (!*yuv2packedX && !*yuv2anyX)
2816 switch (dstFormat) {
2818 #if CONFIG_SWSCALE_ALPHA 2820 *yuv2packed1 = yuv2rgba64le_1_c;
2821 *yuv2packed2 = yuv2rgba64le_2_c;
2822 *yuv2packedX = yuv2rgba64le_X_c;
2826 *yuv2packed1 = yuv2rgbx64le_1_c;
2827 *yuv2packed2 = yuv2rgbx64le_2_c;
2828 *yuv2packedX = yuv2rgbx64le_X_c;
2832 #if CONFIG_SWSCALE_ALPHA 2834 *yuv2packed1 = yuv2rgba64be_1_c;
2835 *yuv2packed2 = yuv2rgba64be_2_c;
2836 *yuv2packedX = yuv2rgba64be_X_c;
2840 *yuv2packed1 = yuv2rgbx64be_1_c;
2841 *yuv2packed2 = yuv2rgbx64be_2_c;
2842 *yuv2packedX = yuv2rgbx64be_X_c;
2846 #if CONFIG_SWSCALE_ALPHA 2848 *yuv2packed1 = yuv2bgra64le_1_c;
2849 *yuv2packed2 = yuv2bgra64le_2_c;
2850 *yuv2packedX = yuv2bgra64le_X_c;
2854 *yuv2packed1 = yuv2bgrx64le_1_c;
2855 *yuv2packed2 = yuv2bgrx64le_2_c;
2856 *yuv2packedX = yuv2bgrx64le_X_c;
2860 #if CONFIG_SWSCALE_ALPHA 2862 *yuv2packed1 = yuv2bgra64be_1_c;
2863 *yuv2packed2 = yuv2bgra64be_2_c;
2864 *yuv2packedX = yuv2bgra64be_X_c;
2868 *yuv2packed1 = yuv2bgrx64be_1_c;
2869 *yuv2packed2 = yuv2bgrx64be_2_c;
2870 *yuv2packedX = yuv2bgrx64be_X_c;
2874 *yuv2packed1 = yuv2rgb48le_1_c;
2875 *yuv2packed2 = yuv2rgb48le_2_c;
2876 *yuv2packedX = yuv2rgb48le_X_c;
2879 *yuv2packed1 = yuv2rgb48be_1_c;
2880 *yuv2packed2 = yuv2rgb48be_2_c;
2881 *yuv2packedX = yuv2rgb48be_X_c;
2884 *yuv2packed1 = yuv2bgr48le_1_c;
2885 *yuv2packed2 = yuv2bgr48le_2_c;
2886 *yuv2packedX = yuv2bgr48le_X_c;
2889 *yuv2packed1 = yuv2bgr48be_1_c;
2890 *yuv2packed2 = yuv2bgr48be_2_c;
2891 *yuv2packedX = yuv2bgr48be_X_c;
2893 case AV_PIX_FMT_RGB32:
2896 *yuv2packed1 = yuv2rgb32_1_c;
2897 *yuv2packed2 = yuv2rgb32_2_c;
2898 *yuv2packedX = yuv2rgb32_X_c;
2900 #if CONFIG_SWSCALE_ALPHA 2902 *yuv2packed1 = yuv2rgba32_1_c;
2903 *yuv2packed2 = yuv2rgba32_2_c;
2904 *yuv2packedX = yuv2rgba32_X_c;
2908 *yuv2packed1 = yuv2rgbx32_1_c;
2909 *yuv2packed2 = yuv2rgbx32_2_c;
2910 *yuv2packedX = yuv2rgbx32_X_c;
2917 *yuv2packed1 = yuv2rgb32_1_1_c;
2918 *yuv2packed2 = yuv2rgb32_1_2_c;
2919 *yuv2packedX = yuv2rgb32_1_X_c;
2921 #if CONFIG_SWSCALE_ALPHA 2923 *yuv2packed1 = yuv2rgba32_1_1_c;
2924 *yuv2packed2 = yuv2rgba32_1_2_c;
2925 *yuv2packedX = yuv2rgba32_1_X_c;
2929 *yuv2packed1 = yuv2rgbx32_1_1_c;
2930 *yuv2packed2 = yuv2rgbx32_1_2_c;
2931 *yuv2packedX = yuv2rgbx32_1_X_c;
2936 *yuv2packed1 = yuv2rgb24_1_c;
2937 *yuv2packed2 = yuv2rgb24_2_c;
2938 *yuv2packedX = yuv2rgb24_X_c;
2941 *yuv2packed1 = yuv2bgr24_1_c;
2942 *yuv2packed2 = yuv2bgr24_2_c;
2943 *yuv2packedX = yuv2bgr24_X_c;
2949 *yuv2packed1 = yuv2rgb16_1_c;
2950 *yuv2packed2 = yuv2rgb16_2_c;
2951 *yuv2packedX = yuv2rgb16_X_c;
2957 *yuv2packed1 = yuv2rgb15_1_c;
2958 *yuv2packed2 = yuv2rgb15_2_c;
2959 *yuv2packedX = yuv2rgb15_X_c;
2965 *yuv2packed1 = yuv2rgb12_1_c;
2966 *yuv2packed2 = yuv2rgb12_2_c;
2967 *yuv2packedX = yuv2rgb12_X_c;
2971 *yuv2packed1 = yuv2rgb8_1_c;
2972 *yuv2packed2 = yuv2rgb8_2_c;
2973 *yuv2packedX = yuv2rgb8_X_c;
2977 *yuv2packed1 = yuv2rgb4_1_c;
2978 *yuv2packed2 = yuv2rgb4_2_c;
2979 *yuv2packedX = yuv2rgb4_X_c;
2983 *yuv2packed1 = yuv2rgb4b_1_c;
2984 *yuv2packed2 = yuv2rgb4b_2_c;
2985 *yuv2packedX = yuv2rgb4b_X_c;
2989 *yuv2packed1 = yuv2x2rgb10_1_c;
2990 *yuv2packed2 = yuv2x2rgb10_2_c;
2991 *yuv2packedX = yuv2x2rgb10_X_c;
2995 switch (dstFormat) {
2997 *yuv2packed1 = yuv2monowhite_1_c;
2998 *yuv2packed2 = yuv2monowhite_2_c;
2999 *yuv2packedX = yuv2monowhite_X_c;
3002 *yuv2packed1 = yuv2monoblack_1_c;
3003 *yuv2packed2 = yuv2monoblack_2_c;
3004 *yuv2packedX = yuv2monoblack_X_c;
3007 *yuv2packed1 = yuv2yuyv422_1_c;
3008 *yuv2packed2 = yuv2yuyv422_2_c;
3009 *yuv2packedX = yuv2yuyv422_X_c;
3012 *yuv2packed1 = yuv2yvyu422_1_c;
3013 *yuv2packed2 = yuv2yvyu422_2_c;
3014 *yuv2packedX = yuv2yvyu422_X_c;
3017 *yuv2packed1 = yuv2uyvy422_1_c;
3018 *yuv2packed2 = yuv2uyvy422_2_c;
3019 *yuv2packedX = yuv2uyvy422_X_c;
3027 *yuv2packed1 = yuv2ya16le_1_c;
3028 *yuv2packed2 = yuv2ya16le_2_c;
3029 *yuv2packedX = yuv2ya16le_X_c;
3032 *yuv2packed1 = yuv2ya16be_1_c;
3033 *yuv2packed2 = yuv2ya16be_2_c;
3034 *yuv2packedX = yuv2ya16be_X_c;
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
IEEE-754 single precision Y, 32bpp, big-endian.
planar GBR 4:4:4:4 40bpp, little-endian
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
static void yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **_lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **_chrUSrc, const int16_t **_chrVSrc, int chrFilterSize, const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
static void yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
static int shift(int a, int b)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define YUVRGB_TABLE_HEADROOM
static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
8 bits gray, 8 bits alpha
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined ...
packed RGB 8:8:8, 24bpp, RGBRGB...
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
#define accumulate_bit(acc, val)
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
const uint8_t ff_dither_2x2_8[][8]
static av_always_inline void yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined ...
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
static void yuv2p010l1_LE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
uint8_t * table_bU[256+2 *YUVRGB_TABLE_HEADROOM]
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Convenience header that includes libavutil's core.
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
planar GBR 4:4:4 36bpp, little-endian
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
#define AV_PIX_FMT_RGB444
planar GBR 4:4:4 36bpp, big-endian
const uint8_t ff_dither_8x8_220[][8]
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
#define output_pixels(pos, Y1, U, Y2, V)
Macro definitions for various function/variable attributes.
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
const uint8_t ff_dither_8x8_32[][8]
planar GBRA 4:4:4:4 64bpp, big-endian
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const uint8_t ff_dither_2x2_4[][8]
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
const uint8_t ff_dither_4x4_16[][8]
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
static av_always_inline void yuv2ya16_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
static void yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined ...
#define SWS_FULL_CHR_H_INT
#define u(width, name, range_min, range_max)
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
planar GBR 4:4:4 48bpp, big-endian
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
static av_always_inline void yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
enum AVPixelFormat dstFormat
Destination pixel format.
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
uint8_t * table_gU[256+2 *YUVRGB_TABLE_HEADROOM]
static void yuv2p010l1_c(const int16_t *src, uint16_t *dest, int dstW, int big_endian)
#define yuv2planeX_float(template, dest_type, BE_LE)
planar GBR 4:4:4 27bpp, big-endian
#define AV_PIX_FMT_BGR32_1
static av_always_inline void yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
16 bits gray, 16 bits alpha (big-endian)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
like NV12, with 16bpp per component, big-endian
static void yuv2p010l1_BE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
static const uint8_t dither[8][8]
static av_always_inline void yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
simple assert() macros that are a bit more flexible than ISO C assert().
like NV12, with 16bpp per component, little-endian
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
static av_always_inline void yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
as above, but U and V bytes are swapped
planar GBR 4:4:4:4 48bpp, big-endian
static av_always_inline void yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src, float *dest, int dstW)
planar GBR 4:4:4:4 40bpp, big-endian
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
static void yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
static av_always_inline void yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
int table_gV[256+2 *YUVRGB_TABLE_HEADROOM]
#define AV_PIX_FMT_X2RGB10
static void yuv2p010cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
packed RGB 8:8:8, 24bpp, BGRBGR...
uint8_t * table_rV[256+2 *YUVRGB_TABLE_HEADROOM]
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
static av_always_inline void yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **unused_chrUSrc, const int32_t **unused_chrVSrc, int unused_chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined ...
planar GBR 4:4:4:4 48bpp, little-endian
#define AV_PIX_FMT_BGR555
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
#define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes)
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static void yuv2ya8_1_c(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y)
planar GBR 4:4:4 30bpp, big-endian
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
planar GBR 4:4:4 42bpp, little-endian
const uint8_t ff_dither_8x8_73[][8]
as above, but U and V bytes are swapped
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
planar GBR 4:4:4 42bpp, big-endian
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined ...
static av_always_inline void yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
static void yuv2p010lX_c(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian)
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
static av_always_inline void yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint32_t *dest, int dstW)
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
#define AV_PIX_FMT_BGR565
static av_always_inline void yuv2ya16_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
static void yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
planar GBRA 4:4:4:4 32bpp
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
planar GBR 4:4:4 27bpp, little-endian
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define AV_PIX_FMT_BGR444
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
#define output_pixel(pos, val, bias, signedness)
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
#define AV_PIX_FMT_RGB555
static av_always_inline void yuv2rgb_write_full(SwsContext *c, uint8_t *dest, int i, int Y, int A, int U, int V, int y, enum AVPixelFormat target, int hasAlpha, int err[4])
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
static av_always_inline void yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
static av_always_inline void yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
#define AV_PIX_FMT_RGB32_1
static av_always_inline void yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
16 bits gray, 16 bits alpha (little-endian)
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
#define AV_PIX_FMT_RGB565
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
planar GBR 4:4:4 48bpp, little-endian
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined ...
#define yuv2plane1_float(template, dest_type, BE_LE)
static void yuv2p016cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
int depth
Number of bits in the component.
IEEE-754 single precision Y, 32bpp, little-endian.
planar GBRA 4:4:4:4 64bpp, little-endian
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
planar GBR 4:4:4 30bpp, little-endian
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian