105 #undef PROFILE_THE_BEAST 108 typedef unsigned char ubyte;
109 typedef signed char sbyte;
146 static const vector
unsigned char 147 perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
148 0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
149 perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
150 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
151 perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
152 0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
153 perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
154 0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
156 #define vec_merge3(x2, x1, x0, y0, y1, y2) \ 158 __typeof__(x0) o0, o2, o3; \ 159 o0 = vec_mergeh(x0, x1); \ 160 y0 = vec_perm(o0, x2, perm_rgb_0); \ 161 o2 = vec_perm(o0, x2, perm_rgb_1); \ 162 o3 = vec_mergel(x0, x1); \ 163 y1 = vec_perm(o3, o2, perm_rgb_2); \ 164 y2 = vec_perm(o3, o2, perm_rgb_3); \ 167 #define vec_mstbgr24(x0, x1, x2, ptr) \ 169 __typeof__(x0) _0, _1, _2; \ 170 vec_merge3(x0, x1, x2, _0, _1, _2); \ 171 vec_st(_0, 0, ptr++); \ 172 vec_st(_1, 0, ptr++); \ 173 vec_st(_2, 0, ptr++); \ 176 #define vec_mstrgb24(x0, x1, x2, ptr) \ 178 __typeof__(x0) _0, _1, _2; \ 179 vec_merge3(x2, x1, x0, _0, _1, _2); \ 180 vec_st(_0, 0, ptr++); \ 181 vec_st(_1, 0, ptr++); \ 182 vec_st(_2, 0, ptr++); \ 189 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr) \ 192 _0 = vec_mergeh(x0, x1); \ 193 _1 = vec_mergeh(x2, x3); \ 194 _2 = (T) vec_mergeh((vector unsigned short) _0, \ 195 (vector unsigned short) _1); \ 196 _3 = (T) vec_mergel((vector unsigned short) _0, \ 197 (vector unsigned short) _1); \ 198 vec_st(_2, 0 * 16, (T *) ptr); \ 199 vec_st(_3, 1 * 16, (T *) ptr); \ 200 _0 = vec_mergel(x0, x1); \ 201 _1 = vec_mergel(x2, x3); \ 202 _2 = (T) vec_mergeh((vector unsigned short) _0, \ 203 (vector unsigned short) _1); \ 204 _3 = (T) vec_mergel((vector unsigned short) _0, \ 205 (vector unsigned short) _1); \ 206 vec_st(_2, 2 * 16, (T *) ptr); \ 207 vec_st(_3, 3 * 16, (T *) ptr); \ 226 (vector signed short) \ 227 vec_perm(x, (__typeof__(x)) { 0 }, \ 228 ((vector unsigned char) { \ 229 0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03, \ 230 0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 })) 233 (vector signed short) \ 234 vec_perm(x, (__typeof__(x)) { 0 }, \ 235 ((vector unsigned char) { \ 236 0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B, \ 237 0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F })) 239 #define vec_unh(x)(vector signed short) vec_mergeh(x,(__typeof__(x)) { 0 }) 240 #define vec_unl(x)(vector signed short) vec_mergel(x,(__typeof__(x)) { 0 }) 243 #define vec_clip_s16(x) \ 244 vec_max(vec_min(x, ((vector signed short) { \ 245 235, 235, 235, 235, 235, 235, 235, 235 })), \ 246 ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 })) 248 #define vec_packclp(x, y) \ 249 (vector unsigned char) \ 250 vec_packs((vector unsigned short) \ 251 vec_max(x, ((vector signed short) { 0 })), \ 252 (vector unsigned short) \ 253 vec_max(y, ((vector signed short) { 0 }))) 255 static inline void cvtyuvtoRGB(
SwsContext *
c, vector
signed short Y,
256 vector
signed short U, vector
signed short V,
257 vector
signed short *
R, vector
signed short *
G,
258 vector
signed short *
B)
260 vector
signed short vx, ux, uvx;
262 Y = vec_mradds(Y, c->CY, c->OY);
263 U = vec_sub(U, (vector
signed short)
264 vec_splat((vector
signed short) { 128 }, 0));
265 V = vec_sub(V, (vector
signed short)
266 vec_splat((vector
signed short) { 128 }, 0));
269 ux = vec_sl(U, c->CSHIFT);
270 *B = vec_mradds(ux, c->CBU, Y);
273 vx = vec_sl(V, c->CSHIFT);
274 *R = vec_mradds(vx, c->CRV, Y);
277 uvx = vec_mradds(U, c->CGU, Y);
278 *G = vec_mradds(V, c->CGV, uvx);
287 #define DEFCSP420_CVT(name, out_pixels) \ 288 static int altivec_ ## name(SwsContext *c, const unsigned char **in, \ 289 int *instrides, int srcSliceY, int srcSliceH, \ 290 unsigned char **oplanes, int *outstrides) \ 295 int instrides_scl[3]; \ 296 vector unsigned char y0, y1; \ 298 vector signed char u, v; \ 300 vector signed short Y0, Y1, Y2, Y3; \ 301 vector signed short U, V; \ 302 vector signed short vx, ux, uvx; \ 303 vector signed short vx0, ux0, uvx0; \ 304 vector signed short vx1, ux1, uvx1; \ 305 vector signed short R0, G0, B0; \ 306 vector signed short R1, G1, B1; \ 307 vector unsigned char R, G, B; \ 309 vector signed short lCY = c->CY; \ 310 vector signed short lOY = c->OY; \ 311 vector signed short lCRV = c->CRV; \ 312 vector signed short lCBU = c->CBU; \ 313 vector signed short lCGU = c->CGU; \ 314 vector signed short lCGV = c->CGV; \ 315 vector unsigned short lCSHIFT = c->CSHIFT; \ 317 const ubyte *y1i = in[0]; \ 318 const ubyte *y2i = in[0] + instrides[0]; \ 319 const ubyte *ui = in[1]; \ 320 const ubyte *vi = in[2]; \ 322 vector unsigned char *oute, *outo; \ 325 instrides_scl[0] = instrides[0] * 2 - w; \ 327 instrides_scl[1] = instrides[1] - w / 2; \ 329 instrides_scl[2] = instrides[2] - w / 2; \ 331 for (i = 0; i < h / 2; i++) { \ 332 oute = (vector unsigned char *)(oplanes[0] + outstrides[0] * \ 333 (srcSliceY + i * 2)); \ 334 outo = oute + (outstrides[0] >> 4); \ 335 vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0); \ 336 vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \ 338 for (j = 0; j < w / 16; j++) { \ 339 y0 = vec_xl(0, y1i); \ 341 y1 = vec_xl(0, y2i); \ 343 u = (vector signed char) vec_xl(0, ui); \ 345 v = (vector signed char) vec_xl(0, vi); \ 347 u = (vector signed char) \ 349 (vector signed char) \ 350 vec_splat((vector signed char) { 128 }, 0)); \ 351 v = (vector signed char) \ 353 (vector signed char) \ 354 vec_splat((vector signed char) { 128 }, 0)); \ 356 U = vec_unpackh(u); \ 357 V = vec_unpackh(v); \ 364 Y0 = vec_mradds(Y0, lCY, lOY); \ 365 Y1 = vec_mradds(Y1, lCY, lOY); \ 366 Y2 = vec_mradds(Y2, lCY, lOY); \ 367 Y3 = vec_mradds(Y3, lCY, lOY); \ 370 ux = vec_sl(U, lCSHIFT); \ 371 ux = vec_mradds(ux, lCBU, (vector signed short) { 0 }); \ 372 ux0 = vec_mergeh(ux, ux); \ 373 ux1 = vec_mergel(ux, ux); \ 376 vx = vec_sl(V, lCSHIFT); \ 377 vx = vec_mradds(vx, lCRV, (vector signed short) { 0 }); \ 378 vx0 = vec_mergeh(vx, vx); \ 379 vx1 = vec_mergel(vx, vx); \ 382 uvx = vec_mradds(U, lCGU, (vector signed short) { 0 }); \ 383 uvx = vec_mradds(V, lCGV, uvx); \ 384 uvx0 = vec_mergeh(uvx, uvx); \ 385 uvx1 = vec_mergel(uvx, uvx); \ 387 R0 = vec_add(Y0, vx0); \ 388 G0 = vec_add(Y0, uvx0); \ 389 B0 = vec_add(Y0, ux0); \ 390 R1 = vec_add(Y1, vx1); \ 391 G1 = vec_add(Y1, uvx1); \ 392 B1 = vec_add(Y1, ux1); \ 394 R = vec_packclp(R0, R1); \ 395 G = vec_packclp(G0, G1); \ 396 B = vec_packclp(B0, B1); \ 398 out_pixels(R, G, B, oute); \ 400 R0 = vec_add(Y2, vx0); \ 401 G0 = vec_add(Y2, uvx0); \ 402 B0 = vec_add(Y2, ux0); \ 403 R1 = vec_add(Y3, vx1); \ 404 G1 = vec_add(Y3, uvx1); \ 405 B1 = vec_add(Y3, ux1); \ 406 R = vec_packclp(R0, R1); \ 407 G = vec_packclp(G0, G1); \ 408 B = vec_packclp(B0, B1); \ 411 out_pixels(R, G, B, outo); \ 419 ui += instrides_scl[1]; \ 420 vi += instrides_scl[2]; \ 421 y1i += instrides_scl[0]; \ 422 y2i += instrides_scl[0]; \ 427 #define out_abgr(a, b, c, ptr) \ 428 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr) 429 #define out_bgra(a, b, c, ptr) \ 430 vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr) 431 #define out_rgba(a, b, c, ptr) \ 432 vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr) 433 #define out_argb(a, b, c, ptr) \ 434 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr) 435 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr) 436 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr) 438 DEFCSP420_CVT(yuv2_abgr, out_abgr)
439 DEFCSP420_CVT(yuv2_bgra, out_bgra)
440 DEFCSP420_CVT(yuv2_rgba, out_rgba)
441 DEFCSP420_CVT(yuv2_argb, out_argb)
442 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
443 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
447 static const vector
unsigned char 448 demux_u = { 0x10, 0x00, 0x10, 0x00,
449 0x10, 0x04, 0x10, 0x04,
450 0x10, 0x08, 0x10, 0x08,
451 0x10, 0x0c, 0x10, 0x0c },
452 demux_v = { 0x10, 0x02, 0x10, 0x02,
453 0x10, 0x06, 0x10, 0x06,
454 0x10, 0x0A, 0x10, 0x0A,
455 0x10, 0x0E, 0x10, 0x0E },
456 demux_y = { 0x10, 0x01, 0x10, 0x03,
457 0x10, 0x05, 0x10, 0x07,
458 0x10, 0x09, 0x10, 0x0B,
459 0x10, 0x0D, 0x10, 0x0F };
464 static int altivec_uyvy_rgb32(
SwsContext *c,
const unsigned char **
in,
465 int *instrides,
int srcSliceY,
int srcSliceH,
466 unsigned char **oplanes,
int *outstrides)
471 vector
unsigned char uyvy;
472 vector
signed short Y,
U,
V;
473 vector
signed short R0, G0,
B0,
R1, G1,
B1;
474 vector
unsigned char R,
G,
B;
475 vector
unsigned char *
out;
479 out = (vector
unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
481 for (i = 0; i <
h; i++)
482 for (j = 0; j < w / 16; j++) {
483 uyvy = vec_ld(0, img);
485 U = (vector
signed short)
486 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
487 V = (vector
signed short)
488 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
489 Y = (vector
signed short)
490 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
492 cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
494 uyvy = vec_ld(16, img);
496 U = (vector
signed short)
497 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
498 V = (vector
signed short)
499 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
500 Y = (vector
signed short)
501 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
503 cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
505 R = vec_packclp(R0, R1);
506 G = vec_packclp(G0, G1);
507 B = vec_packclp(B0, B1);
510 out_rgba(R, G, B, out);
548 if ((c->
srcH & 0x1) != 0)
554 return altivec_yuv2_rgb24;
557 return altivec_yuv2_bgr24;
560 return altivec_yuv2_argb;
563 return altivec_yuv2_abgr;
566 return altivec_yuv2_rgba;
569 return altivec_yuv2_bgra;
570 default:
return NULL;
578 return altivec_uyvy_rgb32;
579 default:
return NULL;
589 const int inv_table[4],
597 vector
signed short vec;
603 buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;
604 buf.tmp[1] = -256 * brightness;
605 buf.tmp[2] = (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);
606 buf.tmp[3] = (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);
607 buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));
608 buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));
610 c->CSHIFT = (vector
unsigned short) vec_splat_u16(2);
611 c->CY = vec_splat((vector
signed short) buf.vec, 0);
612 c->OY = vec_splat((vector
signed short) buf.vec, 1);
613 c->CRV = vec_splat((vector
signed short) buf.vec, 2);
614 c->CBU = vec_splat((vector
signed short) buf.vec, 3);
615 c->CGU = vec_splat((vector
signed short) buf.vec, 4);
616 c->CGV = vec_splat((vector
signed short) buf.vec, 5);
624 const int16_t *lumFilter,
625 const int16_t **lumSrc,
627 const int16_t *chrFilter,
628 const int16_t **chrUSrc,
629 const int16_t **chrVSrc,
631 const int16_t **alpSrc,
637 vector
signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1,
U,
V;
638 vector
signed short R0, G0,
B0,
R1, G1,
B1;
640 vector
unsigned char R,
G,
B;
641 vector
unsigned char *
out, *nout;
643 vector
signed short RND = vec_splat_s16(1 << 3);
644 vector
unsigned short SCL = vec_splat_u16(4);
647 vector
signed short *YCoeffs, *CCoeffs;
649 YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
650 CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
652 out = (vector
unsigned char *) dest;
654 for (i = 0; i < dstW; i += 16) {
658 for (j = 0; j < lumFilterSize; j++) {
659 X0 = vec_ld(0, &lumSrc[j][i]);
660 X1 = vec_ld(16, &lumSrc[j][i]);
661 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
662 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
668 for (j = 0; j < chrFilterSize; j++) {
669 X = vec_ld(0, &chrUSrc[j][i / 2]);
670 U = vec_mradds(X, CCoeffs[j], U);
671 X = vec_ld(0, &chrVSrc[j][i / 2]);
672 V = vec_mradds(X, CCoeffs[j], V);
676 Y0 = vec_sra(Y0, SCL);
677 Y1 = vec_sra(Y1, SCL);
681 Y0 = vec_clip_s16(Y0);
682 Y1 = vec_clip_s16(Y1);
695 U0 = vec_mergeh(U, U);
696 V0 = vec_mergeh(V, V);
698 U1 = vec_mergel(U, U);
699 V1 = vec_mergel(V, V);
701 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
702 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
704 R = vec_packclp(R0, R1);
705 G = vec_packclp(G0, G1);
706 B = vec_packclp(B0, B1);
710 out_abgr(R, G, B, out);
713 out_bgra(R, G, B, out);
716 out_rgba(R, G, B, out);
719 out_argb(R, G, B, out);
722 out_rgb24(R, G, B, out);
725 out_bgr24(R, G, B, out);
731 static int printed_error_message;
732 if (!printed_error_message) {
734 "altivec_yuv2packedX doesn't support %s output\n",
736 printed_error_message = 1;
749 for (j = 0; j < lumFilterSize; j++) {
750 X0 = vec_ld(0, &lumSrc[j][i]);
751 X1 = vec_ld(16, &lumSrc[j][i]);
752 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
753 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
759 for (j = 0; j < chrFilterSize; j++) {
760 X = vec_ld(0, &chrUSrc[j][i / 2]);
761 U = vec_mradds(X, CCoeffs[j], U);
762 X = vec_ld(0, &chrVSrc[j][i / 2]);
763 V = vec_mradds(X, CCoeffs[j], V);
767 Y0 = vec_sra(Y0, SCL);
768 Y1 = vec_sra(Y1, SCL);
772 Y0 = vec_clip_s16(Y0);
773 Y1 = vec_clip_s16(Y1);
786 U0 = vec_mergeh(U, U);
787 V0 = vec_mergeh(V, V);
789 U1 = vec_mergel(U, U);
790 V1 = vec_mergel(V, V);
792 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
793 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
795 R = vec_packclp(R0, R1);
796 G = vec_packclp(G0, G1);
797 B = vec_packclp(B0, B1);
799 nout = (vector
unsigned char *) scratch;
802 out_abgr(R, G, B, nout);
805 out_bgra(R, G, B, nout);
808 out_rgba(R, G, B, nout);
811 out_argb(R, G, B, nout);
814 out_rgb24(R, G, B, nout);
817 out_bgr24(R, G, B, nout);
822 "altivec_yuv2packedX doesn't support %s output\n",
827 memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
831 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \ 832 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, \ 833 const int16_t *lumFilter, \ 834 const int16_t **lumSrc, \ 836 const int16_t *chrFilter, \ 837 const int16_t **chrUSrc, \ 838 const int16_t **chrVSrc, \ 840 const int16_t **alpSrc, \ 841 uint8_t *dest, int dstW, int dstY) \ 843 yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \ 844 chrFilter, chrUSrc, chrVSrc, \ 845 chrFilterSize, alpSrc, \ 846 dest, dstW, dstY, pixfmt); \ packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_CPU_FLAG_ALTIVEC
standard
av_cold void ff_yuv2rgb_init_tables_ppc(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
Macro definitions for various function/variable attributes.
int srcH
Height of source luma/alpha planes.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
enum AVPixelFormat dstFormat
Destination pixel format.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define R0(v, w, x, y, z, i)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
as above, but U and V bytes are swapped
packed RGB 8:8:8, 24bpp, BGRBGR...
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
av_cold SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
enum AVPixelFormat srcFormat
Source pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
#define xf(width, name, var, range_min, range_max, subs,...)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int srcW
Width of source luma/alpha planes.
AVPixelFormat
Pixel format.