FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "rgb2rgb.h"
36 #include "swscale.h"
37 #include "swscale_internal.h"
38 
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
43 };
44 
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
49 };
50 
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
57 };
58 
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
69 };
70 
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
81 };
82 
83 #if 1
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
94 };
95 #elif 1
96 // tries to correct a gamma of 1.5
97 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
107 };
108 #elif 1
109 // tries to correct a gamma of 2.0
110 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
120 };
121 #else
122 // tries to correct a gamma of 2.5
123 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
133 };
134 #endif
135 
136 #define output_pixel(pos, val, bias, signedness) \
137  if (big_endian) { \
138  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
139  } else { \
140  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
141  }
142 
143 static av_always_inline void
144 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
145  int big_endian, int output_bits)
146 {
147  int i;
148  int shift = 3;
149  av_assert0(output_bits == 16);
150 
151  for (i = 0; i < dstW; i++) {
152  int val = src[i] + (1 << (shift - 1));
153  output_pixel(&dest[i], val, 0, uint);
154  }
155 }
156 
157 static av_always_inline void
158 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
159  const int32_t **src, uint16_t *dest, int dstW,
160  int big_endian, int output_bits)
161 {
162  int i;
163  int shift = 15;
164  av_assert0(output_bits == 16);
165 
166  for (i = 0; i < dstW; i++) {
167  int val = 1 << (shift - 1);
168  int j;
169 
170  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
171  * filters (or anything with negative coeffs, the range can be slightly
172  * wider in both directions. To account for this overflow, we subtract
173  * a constant so it always fits in the signed range (assuming a
174  * reasonable filterSize), and re-add that at the end. */
175  val -= 0x40000000;
176  for (j = 0; j < filterSize; j++)
177  val += src[j][i] * (unsigned)filter[j];
178 
179  output_pixel(&dest[i], val, 0x8000, int);
180  }
181 }
182 
183 #undef output_pixel
184 
185 #define output_pixel(pos, val) \
186  if (big_endian) { \
187  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
188  } else { \
189  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
190  }
191 
192 static av_always_inline void
193 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
194  int big_endian, int output_bits)
195 {
196  int i;
197  int shift = 15 - output_bits;
198 
199  for (i = 0; i < dstW; i++) {
200  int val = src[i] + (1 << (shift - 1));
201  output_pixel(&dest[i], val);
202  }
203 }
204 
205 static av_always_inline void
206 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
207  const int16_t **src, uint16_t *dest, int dstW,
208  int big_endian, int output_bits)
209 {
210  int i;
211  int shift = 11 + 16 - output_bits;
212 
213  for (i = 0; i < dstW; i++) {
214  int val = 1 << (shift - 1);
215  int j;
216 
217  for (j = 0; j < filterSize; j++)
218  val += src[j][i] * filter[j];
219 
220  output_pixel(&dest[i], val);
221  }
222 }
223 
224 #undef output_pixel
225 
226 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
227 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
228  uint8_t *dest, int dstW, \
229  const uint8_t *dither, int offset)\
230 { \
231  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
232  (uint16_t *) dest, dstW, is_be, bits); \
233 }\
234 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
235  const int16_t **src, uint8_t *dest, int dstW, \
236  const uint8_t *dither, int offset)\
237 { \
238  yuv2planeX_## template_size ## _c_template(filter, \
239  filterSize, (const typeX_t **) src, \
240  (uint16_t *) dest, dstW, is_be, bits); \
241 }
242 yuv2NBPS( 9, BE, 1, 10, int16_t)
243 yuv2NBPS( 9, LE, 0, 10, int16_t)
244 yuv2NBPS(10, BE, 1, 10, int16_t)
245 yuv2NBPS(10, LE, 0, 10, int16_t)
246 yuv2NBPS(12, BE, 1, 10, int16_t)
247 yuv2NBPS(12, LE, 0, 10, int16_t)
248 yuv2NBPS(14, BE, 1, 10, int16_t)
249 yuv2NBPS(14, LE, 0, 10, int16_t)
250 yuv2NBPS(16, BE, 1, 16, int32_t)
251 yuv2NBPS(16, LE, 0, 16, int32_t)
252 
253 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
254  const int16_t **src, uint8_t *dest, int dstW,
255  const uint8_t *dither, int offset)
256 {
257  int i;
258  for (i=0; i<dstW; i++) {
259  int val = dither[(i + offset) & 7] << 12;
260  int j;
261  for (j=0; j<filterSize; j++)
262  val += src[j][i] * filter[j];
263 
264  dest[i]= av_clip_uint8(val>>19);
265  }
266 }
267 
268 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
269  const uint8_t *dither, int offset)
270 {
271  int i;
272  for (i=0; i<dstW; i++) {
273  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
274  dest[i]= av_clip_uint8(val);
275  }
276 }
277 
278 static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize,
279  const int16_t **chrUSrc, const int16_t **chrVSrc,
280  uint8_t *dest, int chrDstW)
281 {
282  enum AVPixelFormat dstFormat = c->dstFormat;
283  const uint8_t *chrDither = c->chrDither8;
284  int i;
285 
286  if (dstFormat == AV_PIX_FMT_NV12)
287  for (i=0; i<chrDstW; i++) {
288  int u = chrDither[i & 7] << 12;
289  int v = chrDither[(i + 3) & 7] << 12;
290  int j;
291  for (j=0; j<chrFilterSize; j++) {
292  u += chrUSrc[j][i] * chrFilter[j];
293  v += chrVSrc[j][i] * chrFilter[j];
294  }
295 
296  dest[2*i]= av_clip_uint8(u>>19);
297  dest[2*i+1]= av_clip_uint8(v>>19);
298  }
299  else
300  for (i=0; i<chrDstW; i++) {
301  int u = chrDither[i & 7] << 12;
302  int v = chrDither[(i + 3) & 7] << 12;
303  int j;
304  for (j=0; j<chrFilterSize; j++) {
305  u += chrUSrc[j][i] * chrFilter[j];
306  v += chrVSrc[j][i] * chrFilter[j];
307  }
308 
309  dest[2*i]= av_clip_uint8(v>>19);
310  dest[2*i+1]= av_clip_uint8(u>>19);
311  }
312 }
313 
314 
315 #define output_pixel(pos, val) \
316  if (big_endian) { \
317  AV_WB16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
318  } else { \
319  AV_WL16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
320  }
321 
322 static void yuv2p010l1_c(const int16_t *src,
323  uint16_t *dest, int dstW,
324  int big_endian)
325 {
326  int i;
327  int shift = 5;
328 
329  for (i = 0; i < dstW; i++) {
330  int val = src[i] + (1 << (shift - 1));
331  output_pixel(&dest[i], val);
332  }
333 }
334 
335 static void yuv2p010lX_c(const int16_t *filter, int filterSize,
336  const int16_t **src, uint16_t *dest, int dstW,
337  int big_endian)
338 {
339  int i, j;
340  int shift = 17;
341 
342  for (i = 0; i < dstW; i++) {
343  int val = 1 << (shift - 1);
344 
345  for (j = 0; j < filterSize; j++)
346  val += src[j][i] * filter[j];
347 
348  output_pixel(&dest[i], val);
349  }
350 }
351 
352 static void yuv2p010cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize,
353  const int16_t **chrUSrc, const int16_t **chrVSrc,
354  uint8_t *dest8, int chrDstW)
355 {
356  uint16_t *dest = (uint16_t*)dest8;
357  int shift = 17;
358  int big_endian = c->dstFormat == AV_PIX_FMT_P010BE;
359  int i, j;
360 
361  for (i = 0; i < chrDstW; i++) {
362  int u = 1 << (shift - 1);
363  int v = 1 << (shift - 1);
364 
365  for (j = 0; j < chrFilterSize; j++) {
366  u += chrUSrc[j][i] * chrFilter[j];
367  v += chrVSrc[j][i] * chrFilter[j];
368  }
369 
370  output_pixel(&dest[2*i] , u);
371  output_pixel(&dest[2*i+1], v);
372  }
373 }
374 
375 static void yuv2p010l1_LE_c(const int16_t *src,
376  uint8_t *dest, int dstW,
377  const uint8_t *dither, int offset)
378 {
379  yuv2p010l1_c(src, (uint16_t*)dest, dstW, 0);
380 }
381 
382 static void yuv2p010l1_BE_c(const int16_t *src,
383  uint8_t *dest, int dstW,
384  const uint8_t *dither, int offset)
385 {
386  yuv2p010l1_c(src, (uint16_t*)dest, dstW, 1);
387 }
388 
389 static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize,
390  const int16_t **src, uint8_t *dest, int dstW,
391  const uint8_t *dither, int offset)
392 {
393  yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0);
394 }
395 
396 static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize,
397  const int16_t **src, uint8_t *dest, int dstW,
398  const uint8_t *dither, int offset)
399 {
400  yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1);
401 }
402 
403 #undef output_pixel
404 
405 
406 #define accumulate_bit(acc, val) \
407  acc <<= 1; \
408  acc |= (val) >= 234
409 #define output_pixel(pos, acc) \
410  if (target == AV_PIX_FMT_MONOBLACK) { \
411  pos = acc; \
412  } else { \
413  pos = ~acc; \
414  }
415 
416 static av_always_inline void
417 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
418  const int16_t **lumSrc, int lumFilterSize,
419  const int16_t *chrFilter, const int16_t **chrUSrc,
420  const int16_t **chrVSrc, int chrFilterSize,
421  const int16_t **alpSrc, uint8_t *dest, int dstW,
422  int y, enum AVPixelFormat target)
423 {
424  const uint8_t * const d128 = ff_dither_8x8_220[y&7];
425  int i;
426  unsigned acc = 0;
427  int err = 0;
428 
429  for (i = 0; i < dstW; i += 2) {
430  int j;
431  int Y1 = 1 << 18;
432  int Y2 = 1 << 18;
433 
434  for (j = 0; j < lumFilterSize; j++) {
435  Y1 += lumSrc[j][i] * lumFilter[j];
436  Y2 += lumSrc[j][i+1] * lumFilter[j];
437  }
438  Y1 >>= 19;
439  Y2 >>= 19;
440  if ((Y1 | Y2) & 0x100) {
441  Y1 = av_clip_uint8(Y1);
442  Y2 = av_clip_uint8(Y2);
443  }
444  if (c->dither == SWS_DITHER_ED) {
445  Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
446  c->dither_error[0][i] = err;
447  acc = 2*acc + (Y1 >= 128);
448  Y1 -= 220*(acc&1);
449 
450  err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
451  c->dither_error[0][i+1] = Y1;
452  acc = 2*acc + (err >= 128);
453  err -= 220*(acc&1);
454  } else {
455  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
456  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
457  }
458  if ((i & 7) == 6) {
459  output_pixel(*dest++, acc);
460  }
461  }
462  c->dither_error[0][i] = err;
463 
464  if (i & 6) {
465  output_pixel(*dest, acc);
466  }
467 }
468 
469 static av_always_inline void
470 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
471  const int16_t *ubuf[2], const int16_t *vbuf[2],
472  const int16_t *abuf[2], uint8_t *dest, int dstW,
473  int yalpha, int uvalpha, int y,
474  enum AVPixelFormat target)
475 {
476  const int16_t *buf0 = buf[0], *buf1 = buf[1];
477  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
478  int yalpha1 = 4096 - yalpha;
479  int i;
480  av_assert2(yalpha <= 4096U);
481 
482  if (c->dither == SWS_DITHER_ED) {
483  int err = 0;
484  int acc = 0;
485  for (i = 0; i < dstW; i +=2) {
486  int Y;
487 
488  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
489  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
490  c->dither_error[0][i] = err;
491  acc = 2*acc + (Y >= 128);
492  Y -= 220*(acc&1);
493 
494  err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
495  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
496  c->dither_error[0][i+1] = Y;
497  acc = 2*acc + (err >= 128);
498  err -= 220*(acc&1);
499 
500  if ((i & 7) == 6)
501  output_pixel(*dest++, acc);
502  }
503  c->dither_error[0][i] = err;
504  } else {
505  for (i = 0; i < dstW; i += 8) {
506  int Y, acc = 0;
507 
508  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
509  accumulate_bit(acc, Y + d128[0]);
510  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
511  accumulate_bit(acc, Y + d128[1]);
512  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
513  accumulate_bit(acc, Y + d128[2]);
514  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
515  accumulate_bit(acc, Y + d128[3]);
516  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
517  accumulate_bit(acc, Y + d128[4]);
518  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
519  accumulate_bit(acc, Y + d128[5]);
520  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
521  accumulate_bit(acc, Y + d128[6]);
522  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
523  accumulate_bit(acc, Y + d128[7]);
524 
525  output_pixel(*dest++, acc);
526  }
527  }
528 }
529 
530 static av_always_inline void
531 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
532  const int16_t *ubuf[2], const int16_t *vbuf[2],
533  const int16_t *abuf0, uint8_t *dest, int dstW,
534  int uvalpha, int y, enum AVPixelFormat target)
535 {
536  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
537  int i;
538 
539  if (c->dither == SWS_DITHER_ED) {
540  int err = 0;
541  int acc = 0;
542  for (i = 0; i < dstW; i +=2) {
543  int Y;
544 
545  Y = ((buf0[i + 0] + 64) >> 7);
546  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
547  c->dither_error[0][i] = err;
548  acc = 2*acc + (Y >= 128);
549  Y -= 220*(acc&1);
550 
551  err = ((buf0[i + 1] + 64) >> 7);
552  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
553  c->dither_error[0][i+1] = Y;
554  acc = 2*acc + (err >= 128);
555  err -= 220*(acc&1);
556 
557  if ((i & 7) == 6)
558  output_pixel(*dest++, acc);
559  }
560  c->dither_error[0][i] = err;
561  } else {
562  for (i = 0; i < dstW; i += 8) {
563  int acc = 0;
564  accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
565  accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
566  accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
567  accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
568  accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
569  accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
570  accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
571  accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
572 
573  output_pixel(*dest++, acc);
574  }
575  }
576 }
577 
578 #undef output_pixel
579 #undef accumulate_bit
580 
581 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
582 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
583  const int16_t **lumSrc, int lumFilterSize, \
584  const int16_t *chrFilter, const int16_t **chrUSrc, \
585  const int16_t **chrVSrc, int chrFilterSize, \
586  const int16_t **alpSrc, uint8_t *dest, int dstW, \
587  int y) \
588 { \
589  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
590  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
591  alpSrc, dest, dstW, y, fmt); \
592 } \
593  \
594 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
595  const int16_t *ubuf[2], const int16_t *vbuf[2], \
596  const int16_t *abuf[2], uint8_t *dest, int dstW, \
597  int yalpha, int uvalpha, int y) \
598 { \
599  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
600  dest, dstW, yalpha, uvalpha, y, fmt); \
601 } \
602  \
603 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
604  const int16_t *ubuf[2], const int16_t *vbuf[2], \
605  const int16_t *abuf0, uint8_t *dest, int dstW, \
606  int uvalpha, int y) \
607 { \
608  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
609  abuf0, dest, dstW, uvalpha, \
610  y, fmt); \
611 }
612 
613 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
614 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
615 
616 #define output_pixels(pos, Y1, U, Y2, V) \
617  if (target == AV_PIX_FMT_YUYV422) { \
618  dest[pos + 0] = Y1; \
619  dest[pos + 1] = U; \
620  dest[pos + 2] = Y2; \
621  dest[pos + 3] = V; \
622  } else if (target == AV_PIX_FMT_YVYU422) { \
623  dest[pos + 0] = Y1; \
624  dest[pos + 1] = V; \
625  dest[pos + 2] = Y2; \
626  dest[pos + 3] = U; \
627  } else { /* AV_PIX_FMT_UYVY422 */ \
628  dest[pos + 0] = U; \
629  dest[pos + 1] = Y1; \
630  dest[pos + 2] = V; \
631  dest[pos + 3] = Y2; \
632  }
633 
634 static av_always_inline void
635 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
636  const int16_t **lumSrc, int lumFilterSize,
637  const int16_t *chrFilter, const int16_t **chrUSrc,
638  const int16_t **chrVSrc, int chrFilterSize,
639  const int16_t **alpSrc, uint8_t *dest, int dstW,
640  int y, enum AVPixelFormat target)
641 {
642  int i;
643 
644  for (i = 0; i < ((dstW + 1) >> 1); i++) {
645  int j;
646  int Y1 = 1 << 18;
647  int Y2 = 1 << 18;
648  int U = 1 << 18;
649  int V = 1 << 18;
650 
651  for (j = 0; j < lumFilterSize; j++) {
652  Y1 += lumSrc[j][i * 2] * lumFilter[j];
653  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
654  }
655  for (j = 0; j < chrFilterSize; j++) {
656  U += chrUSrc[j][i] * chrFilter[j];
657  V += chrVSrc[j][i] * chrFilter[j];
658  }
659  Y1 >>= 19;
660  Y2 >>= 19;
661  U >>= 19;
662  V >>= 19;
663  if ((Y1 | Y2 | U | V) & 0x100) {
664  Y1 = av_clip_uint8(Y1);
665  Y2 = av_clip_uint8(Y2);
666  U = av_clip_uint8(U);
667  V = av_clip_uint8(V);
668  }
669  output_pixels(4*i, Y1, U, Y2, V);
670  }
671 }
672 
673 static av_always_inline void
674 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
675  const int16_t *ubuf[2], const int16_t *vbuf[2],
676  const int16_t *abuf[2], uint8_t *dest, int dstW,
677  int yalpha, int uvalpha, int y,
678  enum AVPixelFormat target)
679 {
680  const int16_t *buf0 = buf[0], *buf1 = buf[1],
681  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
682  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
683  int yalpha1 = 4096 - yalpha;
684  int uvalpha1 = 4096 - uvalpha;
685  int i;
686  av_assert2(yalpha <= 4096U);
687  av_assert2(uvalpha <= 4096U);
688 
689  for (i = 0; i < ((dstW + 1) >> 1); i++) {
690  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
691  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
692  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
693  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
694 
695  if ((Y1 | Y2 | U | V) & 0x100) {
696  Y1 = av_clip_uint8(Y1);
697  Y2 = av_clip_uint8(Y2);
698  U = av_clip_uint8(U);
699  V = av_clip_uint8(V);
700  }
701 
702  output_pixels(i * 4, Y1, U, Y2, V);
703  }
704 }
705 
706 static av_always_inline void
707 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
708  const int16_t *ubuf[2], const int16_t *vbuf[2],
709  const int16_t *abuf0, uint8_t *dest, int dstW,
710  int uvalpha, int y, enum AVPixelFormat target)
711 {
712  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
713  int i;
714 
715  if (uvalpha < 2048) {
716  for (i = 0; i < ((dstW + 1) >> 1); i++) {
717  int Y1 = (buf0[i * 2 ]+64) >> 7;
718  int Y2 = (buf0[i * 2 + 1]+64) >> 7;
719  int U = (ubuf0[i] +64) >> 7;
720  int V = (vbuf0[i] +64) >> 7;
721 
722  if ((Y1 | Y2 | U | V) & 0x100) {
723  Y1 = av_clip_uint8(Y1);
724  Y2 = av_clip_uint8(Y2);
725  U = av_clip_uint8(U);
726  V = av_clip_uint8(V);
727  }
728 
729  Y1 = av_clip_uint8(Y1);
730  Y2 = av_clip_uint8(Y2);
731  U = av_clip_uint8(U);
732  V = av_clip_uint8(V);
733 
734  output_pixels(i * 4, Y1, U, Y2, V);
735  }
736  } else {
737  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
738  for (i = 0; i < ((dstW + 1) >> 1); i++) {
739  int Y1 = (buf0[i * 2 ] + 64) >> 7;
740  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
741  int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
742  int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
743 
744  if ((Y1 | Y2 | U | V) & 0x100) {
745  Y1 = av_clip_uint8(Y1);
746  Y2 = av_clip_uint8(Y2);
747  U = av_clip_uint8(U);
748  V = av_clip_uint8(V);
749  }
750 
751  Y1 = av_clip_uint8(Y1);
752  Y2 = av_clip_uint8(Y2);
753  U = av_clip_uint8(U);
754  V = av_clip_uint8(V);
755 
756  output_pixels(i * 4, Y1, U, Y2, V);
757  }
758  }
759 }
760 
761 #undef output_pixels
762 
763 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
764 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
765 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
766 
767 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
768 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
769 #define output_pixel(pos, val) \
770  if (isBE(target)) { \
771  AV_WB16(pos, val); \
772  } else { \
773  AV_WL16(pos, val); \
774  }
775 
776 static av_always_inline void
777 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
778  const int32_t **lumSrc, int lumFilterSize,
779  const int16_t *chrFilter, const int32_t **chrUSrc,
780  const int32_t **chrVSrc, int chrFilterSize,
781  const int32_t **alpSrc, uint16_t *dest, int dstW,
782  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
783 {
784  int i;
785  int A1 = 0xffff<<14, A2 = 0xffff<<14;
786 
787  for (i = 0; i < ((dstW + 1) >> 1); i++) {
788  int j;
789  int Y1 = -0x40000000;
790  int Y2 = -0x40000000;
791  int U = -(128 << 23); // 19
792  int V = -(128 << 23);
793  int R, G, B;
794 
795  for (j = 0; j < lumFilterSize; j++) {
796  Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
797  Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
798  }
799  for (j = 0; j < chrFilterSize; j++) {;
800  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
801  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
802  }
803 
804  if (hasAlpha) {
805  A1 = -0x40000000;
806  A2 = -0x40000000;
807  for (j = 0; j < lumFilterSize; j++) {
808  A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
809  A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
810  }
811  A1 >>= 1;
812  A1 += 0x20002000;
813  A2 >>= 1;
814  A2 += 0x20002000;
815  }
816 
817  // 8 bits: 12+15=27; 16 bits: 12+19=31
818  Y1 >>= 14; // 10
819  Y1 += 0x10000;
820  Y2 >>= 14;
821  Y2 += 0x10000;
822  U >>= 14;
823  V >>= 14;
824 
825  // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
826  Y1 -= c->yuv2rgb_y_offset;
827  Y2 -= c->yuv2rgb_y_offset;
828  Y1 *= c->yuv2rgb_y_coeff;
829  Y2 *= c->yuv2rgb_y_coeff;
830  Y1 += 1 << 13; // 21
831  Y2 += 1 << 13;
832  // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
833 
834  R = V * c->yuv2rgb_v2r_coeff;
835  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
836  B = U * c->yuv2rgb_u2b_coeff;
837 
838  // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
839  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
840  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
841  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
842  if (eightbytes) {
843  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
844  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
845  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
846  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
847  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
848  dest += 8;
849  } else {
850  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
851  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
852  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
853  dest += 6;
854  }
855  }
856 }
857 
858 static av_always_inline void
860  const int32_t *ubuf[2], const int32_t *vbuf[2],
861  const int32_t *abuf[2], uint16_t *dest, int dstW,
862  int yalpha, int uvalpha, int y,
863  enum AVPixelFormat target, int hasAlpha, int eightbytes)
864 {
865  const int32_t *buf0 = buf[0], *buf1 = buf[1],
866  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
867  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
868  *abuf0 = hasAlpha ? abuf[0] : NULL,
869  *abuf1 = hasAlpha ? abuf[1] : NULL;
870  int yalpha1 = 4096 - yalpha;
871  int uvalpha1 = 4096 - uvalpha;
872  int i;
873  int A1 = 0xffff<<14, A2 = 0xffff<<14;
874 
875  av_assert2(yalpha <= 4096U);
876  av_assert2(uvalpha <= 4096U);
877 
878  for (i = 0; i < ((dstW + 1) >> 1); i++) {
879  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
880  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
881  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
882  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
883  int R, G, B;
884 
885  Y1 -= c->yuv2rgb_y_offset;
886  Y2 -= c->yuv2rgb_y_offset;
887  Y1 *= c->yuv2rgb_y_coeff;
888  Y2 *= c->yuv2rgb_y_coeff;
889  Y1 += 1 << 13;
890  Y2 += 1 << 13;
891 
892  R = V * c->yuv2rgb_v2r_coeff;
893  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
894  B = U * c->yuv2rgb_u2b_coeff;
895 
896  if (hasAlpha) {
897  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
898  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
899 
900  A1 += 1 << 13;
901  A2 += 1 << 13;
902  }
903 
904  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
905  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
906  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
907  if (eightbytes) {
908  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
909  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
910  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
911  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
912  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
913  dest += 8;
914  } else {
915  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
916  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
917  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
918  dest += 6;
919  }
920  }
921 }
922 
923 static av_always_inline void
925  const int32_t *ubuf[2], const int32_t *vbuf[2],
926  const int32_t *abuf0, uint16_t *dest, int dstW,
927  int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
928 {
929  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
930  int i;
931  int A1 = 0xffff<<14, A2= 0xffff<<14;
932 
933  if (uvalpha < 2048) {
934  for (i = 0; i < ((dstW + 1) >> 1); i++) {
935  int Y1 = (buf0[i * 2] ) >> 2;
936  int Y2 = (buf0[i * 2 + 1]) >> 2;
937  int U = (ubuf0[i] - (128 << 11)) >> 2;
938  int V = (vbuf0[i] - (128 << 11)) >> 2;
939  int R, G, B;
940 
941  Y1 -= c->yuv2rgb_y_offset;
942  Y2 -= c->yuv2rgb_y_offset;
943  Y1 *= c->yuv2rgb_y_coeff;
944  Y2 *= c->yuv2rgb_y_coeff;
945  Y1 += 1 << 13;
946  Y2 += 1 << 13;
947 
948  if (hasAlpha) {
949  A1 = abuf0[i * 2 ] << 11;
950  A2 = abuf0[i * 2 + 1] << 11;
951 
952  A1 += 1 << 13;
953  A2 += 1 << 13;
954  }
955 
956  R = V * c->yuv2rgb_v2r_coeff;
957  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
958  B = U * c->yuv2rgb_u2b_coeff;
959 
960  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
961  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
962  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
963  if (eightbytes) {
964  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
965  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
966  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
967  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
968  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
969  dest += 8;
970  } else {
971  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
972  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
973  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
974  dest += 6;
975  }
976  }
977  } else {
978  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
979  int A1 = 0xffff<<14, A2 = 0xffff<<14;
980  for (i = 0; i < ((dstW + 1) >> 1); i++) {
981  int Y1 = (buf0[i * 2] ) >> 2;
982  int Y2 = (buf0[i * 2 + 1]) >> 2;
983  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
984  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
985  int R, G, B;
986 
987  Y1 -= c->yuv2rgb_y_offset;
988  Y2 -= c->yuv2rgb_y_offset;
989  Y1 *= c->yuv2rgb_y_coeff;
990  Y2 *= c->yuv2rgb_y_coeff;
991  Y1 += 1 << 13;
992  Y2 += 1 << 13;
993 
994  if (hasAlpha) {
995  A1 = abuf0[i * 2 ] << 11;
996  A2 = abuf0[i * 2 + 1] << 11;
997 
998  A1 += 1 << 13;
999  A2 += 1 << 13;
1000  }
1001 
1002  R = V * c->yuv2rgb_v2r_coeff;
1003  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1004  B = U * c->yuv2rgb_u2b_coeff;
1005 
1006  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1007  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1008  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1009  if (eightbytes) {
1010  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1011  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1012  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1013  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1014  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1015  dest += 8;
1016  } else {
1017  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1018  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1019  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1020  dest += 6;
1021  }
1022  }
1023  }
1024 }
1025 
1026 static av_always_inline void
1027 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1028  const int32_t **lumSrc, int lumFilterSize,
1029  const int16_t *chrFilter, const int32_t **chrUSrc,
1030  const int32_t **chrVSrc, int chrFilterSize,
1031  const int32_t **alpSrc, uint16_t *dest, int dstW,
1032  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1033 {
1034  int i;
1035  int A = 0xffff<<14;
1036 
1037  for (i = 0; i < dstW; i++) {
1038  int j;
1039  int Y = -0x40000000;
1040  int U = -(128 << 23); // 19
1041  int V = -(128 << 23);
1042  int R, G, B;
1043 
1044  for (j = 0; j < lumFilterSize; j++) {
1045  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
1046  }
1047  for (j = 0; j < chrFilterSize; j++) {;
1048  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1049  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1050  }
1051 
1052  if (hasAlpha) {
1053  A = -0x40000000;
1054  for (j = 0; j < lumFilterSize; j++) {
1055  A += alpSrc[j][i] * (unsigned)lumFilter[j];
1056  }
1057  A >>= 1;
1058  A += 0x20002000;
1059  }
1060 
1061  // 8bit: 12+15=27; 16-bit: 12+19=31
1062  Y >>= 14; // 10
1063  Y += 0x10000;
1064  U >>= 14;
1065  V >>= 14;
1066 
1067  // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
1068  Y -= c->yuv2rgb_y_offset;
1069  Y *= c->yuv2rgb_y_coeff;
1070  Y += 1 << 13; // 21
1071  // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
1072 
1073  R = V * c->yuv2rgb_v2r_coeff;
1074  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1075  B = U * c->yuv2rgb_u2b_coeff;
1076 
1077  // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
1078  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1079  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1080  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1081  if (eightbytes) {
1082  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1083  dest += 4;
1084  } else {
1085  dest += 3;
1086  }
1087  }
1088 }
1089 
1090 static av_always_inline void
1092  const int32_t *ubuf[2], const int32_t *vbuf[2],
1093  const int32_t *abuf[2], uint16_t *dest, int dstW,
1094  int yalpha, int uvalpha, int y,
1095  enum AVPixelFormat target, int hasAlpha, int eightbytes)
1096 {
1097  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1098  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1099  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1100  *abuf0 = hasAlpha ? abuf[0] : NULL,
1101  *abuf1 = hasAlpha ? abuf[1] : NULL;
1102  int yalpha1 = 4096 - yalpha;
1103  int uvalpha1 = 4096 - uvalpha;
1104  int i;
1105  int A = 0xffff<<14;
1106 
1107  av_assert2(yalpha <= 4096U);
1108  av_assert2(uvalpha <= 4096U);
1109 
1110  for (i = 0; i < dstW; i++) {
1111  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1112  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1113  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1114  int R, G, B;
1115 
1116  Y -= c->yuv2rgb_y_offset;
1117  Y *= c->yuv2rgb_y_coeff;
1118  Y += 1 << 13;
1119 
1120  R = V * c->yuv2rgb_v2r_coeff;
1121  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1122  B = U * c->yuv2rgb_u2b_coeff;
1123 
1124  if (hasAlpha) {
1125  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1126 
1127  A += 1 << 13;
1128  }
1129 
1130  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1131  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1132  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1133  if (eightbytes) {
1134  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1135  dest += 4;
1136  } else {
1137  dest += 3;
1138  }
1139  }
1140 }
1141 
1142 static av_always_inline void
1144  const int32_t *ubuf[2], const int32_t *vbuf[2],
1145  const int32_t *abuf0, uint16_t *dest, int dstW,
1146  int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1147 {
1148  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1149  int i;
1150  int A = 0xffff<<14;
1151 
1152  if (uvalpha < 2048) {
1153  for (i = 0; i < dstW; i++) {
1154  int Y = (buf0[i]) >> 2;
1155  int U = (ubuf0[i] - (128 << 11)) >> 2;
1156  int V = (vbuf0[i] - (128 << 11)) >> 2;
1157  int R, G, B;
1158 
1159  Y -= c->yuv2rgb_y_offset;
1160  Y *= c->yuv2rgb_y_coeff;
1161  Y += 1 << 13;
1162 
1163  if (hasAlpha) {
1164  A = abuf0[i] << 11;
1165 
1166  A += 1 << 13;
1167  }
1168 
1169  R = V * c->yuv2rgb_v2r_coeff;
1170  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1171  B = U * c->yuv2rgb_u2b_coeff;
1172 
1173  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1174  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1175  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1176  if (eightbytes) {
1177  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1178  dest += 4;
1179  } else {
1180  dest += 3;
1181  }
1182  }
1183  } else {
1184  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1185  int A = 0xffff<<14;
1186  for (i = 0; i < dstW; i++) {
1187  int Y = (buf0[i] ) >> 2;
1188  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1189  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1190  int R, G, B;
1191 
1192  Y -= c->yuv2rgb_y_offset;
1193  Y *= c->yuv2rgb_y_coeff;
1194  Y += 1 << 13;
1195 
1196  if (hasAlpha) {
1197  A = abuf0[i] << 11;
1198 
1199  A += 1 << 13;
1200  }
1201 
1202  R = V * c->yuv2rgb_v2r_coeff;
1203  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1204  B = U * c->yuv2rgb_u2b_coeff;
1205 
1206  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1207  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1208  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1209  if (eightbytes) {
1210  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1211  dest += 4;
1212  } else {
1213  dest += 3;
1214  }
1215  }
1216  }
1217 }
1218 
1219 #undef output_pixel
1220 #undef r_b
1221 #undef b_r
1222 
1223 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \
1224 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1225  const int16_t **_lumSrc, int lumFilterSize, \
1226  const int16_t *chrFilter, const int16_t **_chrUSrc, \
1227  const int16_t **_chrVSrc, int chrFilterSize, \
1228  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1229  int y) \
1230 { \
1231  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1232  **chrUSrc = (const int32_t **) _chrUSrc, \
1233  **chrVSrc = (const int32_t **) _chrVSrc, \
1234  **alpSrc = (const int32_t **) _alpSrc; \
1235  uint16_t *dest = (uint16_t *) _dest; \
1236  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1237  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1238  alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \
1239 } \
1240  \
1241 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1242  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1243  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1244  int yalpha, int uvalpha, int y) \
1245 { \
1246  const int32_t **buf = (const int32_t **) _buf, \
1247  **ubuf = (const int32_t **) _ubuf, \
1248  **vbuf = (const int32_t **) _vbuf, \
1249  **abuf = (const int32_t **) _abuf; \
1250  uint16_t *dest = (uint16_t *) _dest; \
1251  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1252  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \
1253 } \
1254  \
1255 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1256  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1257  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1258  int uvalpha, int y) \
1259 { \
1260  const int32_t *buf0 = (const int32_t *) _buf0, \
1261  **ubuf = (const int32_t **) _ubuf, \
1262  **vbuf = (const int32_t **) _vbuf, \
1263  *abuf0 = (const int32_t *) _abuf0; \
1264  uint16_t *dest = (uint16_t *) _dest; \
1265  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1266  dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \
1267 }
1268 
1269 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48BE, 0, 0)
1270 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48LE, 0, 0)
1271 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48BE, 0, 0)
1272 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48LE, 0, 0)
1273 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64BE, 1, 1)
1274 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64LE, 1, 1)
1275 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64BE, 0, 1)
1276 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64LE, 0, 1)
1277 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64BE, 1, 1)
1278 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64LE, 1, 1)
1279 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64BE, 0, 1)
1280 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64LE, 0, 1)
1281 
1282 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48BE, 0, 0)
1283 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48LE, 0, 0)
1284 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48BE, 0, 0)
1285 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48LE, 0, 0)
1286 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64BE, 1, 1)
1287 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64LE, 1, 1)
1288 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64BE, 0, 1)
1289 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64LE, 0, 1)
1290 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64BE, 1, 1)
1291 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64LE, 1, 1)
1292 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64BE, 0, 1)
1293 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64LE, 0, 1)
1294 
1295 /*
1296  * Write out 2 RGB pixels in the target pixel format. This function takes a
1297  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1298  * things like endianness conversion and shifting. The caller takes care of
1299  * setting the correct offset in these tables from the chroma (U/V) values.
1300  * This function then uses the luminance (Y1/Y2) values to write out the
1301  * correct RGB values into the destination buffer.
1302  */
1303 static av_always_inline void
1304 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1305  unsigned A1, unsigned A2,
1306  const void *_r, const void *_g, const void *_b, int y,
1307  enum AVPixelFormat target, int hasAlpha)
1308 {
1309  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1310  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1311  uint32_t *dest = (uint32_t *) _dest;
1312  const uint32_t *r = (const uint32_t *) _r;
1313  const uint32_t *g = (const uint32_t *) _g;
1314  const uint32_t *b = (const uint32_t *) _b;
1315 
1316 #if CONFIG_SMALL
1317  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1318 
1319  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1320  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1321 #else
1322  if (hasAlpha) {
1323  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1324 
1325  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1326  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1327  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1328  } else {
1329 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1330  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1331 
1332  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1333 #endif
1334  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1335  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1336  }
1337 #endif
1338  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1339  uint8_t *dest = (uint8_t *) _dest;
1340  const uint8_t *r = (const uint8_t *) _r;
1341  const uint8_t *g = (const uint8_t *) _g;
1342  const uint8_t *b = (const uint8_t *) _b;
1343 
1344 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1345 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1346 
1347  dest[i * 6 + 0] = r_b[Y1];
1348  dest[i * 6 + 1] = g[Y1];
1349  dest[i * 6 + 2] = b_r[Y1];
1350  dest[i * 6 + 3] = r_b[Y2];
1351  dest[i * 6 + 4] = g[Y2];
1352  dest[i * 6 + 5] = b_r[Y2];
1353 #undef r_b
1354 #undef b_r
1355  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1356  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1357  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1358  uint16_t *dest = (uint16_t *) _dest;
1359  const uint16_t *r = (const uint16_t *) _r;
1360  const uint16_t *g = (const uint16_t *) _g;
1361  const uint16_t *b = (const uint16_t *) _b;
1362  int dr1, dg1, db1, dr2, dg2, db2;
1363 
1364  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1365  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1366  dg1 = ff_dither_2x2_4[ y & 1 ][0];
1367  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1368  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1369  dg2 = ff_dither_2x2_4[ y & 1 ][1];
1370  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1371  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1372  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1373  dg1 = ff_dither_2x2_8[ y & 1 ][1];
1374  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1375  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1376  dg2 = ff_dither_2x2_8[ y & 1 ][0];
1377  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1378  } else {
1379  dr1 = ff_dither_4x4_16[ y & 3 ][0];
1380  dg1 = ff_dither_4x4_16[ y & 3 ][1];
1381  db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1382  dr2 = ff_dither_4x4_16[ y & 3 ][1];
1383  dg2 = ff_dither_4x4_16[ y & 3 ][0];
1384  db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1385  }
1386 
1387  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1388  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1389  } else /* 8/4 bits */ {
1390  uint8_t *dest = (uint8_t *) _dest;
1391  const uint8_t *r = (const uint8_t *) _r;
1392  const uint8_t *g = (const uint8_t *) _g;
1393  const uint8_t *b = (const uint8_t *) _b;
1394  int dr1, dg1, db1, dr2, dg2, db2;
1395 
1396  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1397  const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1398  const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1399  dr1 = dg1 = d32[(i * 2 + 0) & 7];
1400  db1 = d64[(i * 2 + 0) & 7];
1401  dr2 = dg2 = d32[(i * 2 + 1) & 7];
1402  db2 = d64[(i * 2 + 1) & 7];
1403  } else {
1404  const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1405  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1406  dr1 = db1 = d128[(i * 2 + 0) & 7];
1407  dg1 = d64[(i * 2 + 0) & 7];
1408  dr2 = db2 = d128[(i * 2 + 1) & 7];
1409  dg2 = d64[(i * 2 + 1) & 7];
1410  }
1411 
1412  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1413  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1414  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1415  } else {
1416  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1417  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1418  }
1419  }
1420 }
1421 
1422 static av_always_inline void
1423 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1424  const int16_t **lumSrc, int lumFilterSize,
1425  const int16_t *chrFilter, const int16_t **chrUSrc,
1426  const int16_t **chrVSrc, int chrFilterSize,
1427  const int16_t **alpSrc, uint8_t *dest, int dstW,
1428  int y, enum AVPixelFormat target, int hasAlpha)
1429 {
1430  int i;
1431 
1432  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1433  int j, A1, A2;
1434  int Y1 = 1 << 18;
1435  int Y2 = 1 << 18;
1436  int U = 1 << 18;
1437  int V = 1 << 18;
1438  const void *r, *g, *b;
1439 
1440  for (j = 0; j < lumFilterSize; j++) {
1441  Y1 += lumSrc[j][i * 2] * lumFilter[j];
1442  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1443  }
1444  for (j = 0; j < chrFilterSize; j++) {
1445  U += chrUSrc[j][i] * chrFilter[j];
1446  V += chrVSrc[j][i] * chrFilter[j];
1447  }
1448  Y1 >>= 19;
1449  Y2 >>= 19;
1450  U >>= 19;
1451  V >>= 19;
1452  if (hasAlpha) {
1453  A1 = 1 << 18;
1454  A2 = 1 << 18;
1455  for (j = 0; j < lumFilterSize; j++) {
1456  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1457  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1458  }
1459  A1 >>= 19;
1460  A2 >>= 19;
1461  if ((A1 | A2) & 0x100) {
1462  A1 = av_clip_uint8(A1);
1463  A2 = av_clip_uint8(A2);
1464  }
1465  }
1466 
1467  r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1469  b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1470 
1471  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1472  r, g, b, y, target, hasAlpha);
1473  }
1474 }
1475 
1476 static av_always_inline void
1477 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1478  const int16_t *ubuf[2], const int16_t *vbuf[2],
1479  const int16_t *abuf[2], uint8_t *dest, int dstW,
1480  int yalpha, int uvalpha, int y,
1481  enum AVPixelFormat target, int hasAlpha)
1482 {
1483  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1484  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1485  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1486  *abuf0 = hasAlpha ? abuf[0] : NULL,
1487  *abuf1 = hasAlpha ? abuf[1] : NULL;
1488  int yalpha1 = 4096 - yalpha;
1489  int uvalpha1 = 4096 - uvalpha;
1490  int i;
1491  av_assert2(yalpha <= 4096U);
1492  av_assert2(uvalpha <= 4096U);
1493 
1494  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1495  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1496  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1497  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1498  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1499  int A1, A2;
1500  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1502  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1503 
1504  if (hasAlpha) {
1505  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1506  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1507  A1 = av_clip_uint8(A1);
1508  A2 = av_clip_uint8(A2);
1509  }
1510 
1511  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1512  r, g, b, y, target, hasAlpha);
1513  }
1514 }
1515 
1516 static av_always_inline void
1517 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1518  const int16_t *ubuf[2], const int16_t *vbuf[2],
1519  const int16_t *abuf0, uint8_t *dest, int dstW,
1520  int uvalpha, int y, enum AVPixelFormat target,
1521  int hasAlpha)
1522 {
1523  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1524  int i;
1525 
1526  if (uvalpha < 2048) {
1527  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1528  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1529  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1530  int U = (ubuf0[i] + 64) >> 7;
1531  int V = (vbuf0[i] + 64) >> 7;
1532  int A1, A2;
1533  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1535  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1536 
1537  if (hasAlpha) {
1538  A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1539  A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1540  A1 = av_clip_uint8(A1);
1541  A2 = av_clip_uint8(A2);
1542  }
1543 
1544  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1545  r, g, b, y, target, hasAlpha);
1546  }
1547  } else {
1548  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1549  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1550  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1551  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1552  int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1553  int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1554  int A1, A2;
1555  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1557  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1558 
1559  if (hasAlpha) {
1560  A1 = (abuf0[i * 2 ] + 64) >> 7;
1561  A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1562  A1 = av_clip_uint8(A1);
1563  A2 = av_clip_uint8(A2);
1564  }
1565 
1566  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1567  r, g, b, y, target, hasAlpha);
1568  }
1569  }
1570 }
1571 
1572 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1573 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1574  const int16_t **lumSrc, int lumFilterSize, \
1575  const int16_t *chrFilter, const int16_t **chrUSrc, \
1576  const int16_t **chrVSrc, int chrFilterSize, \
1577  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1578  int y) \
1579 { \
1580  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1581  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1582  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1583 }
1584 
1585 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1586 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1587 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1588  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1589  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1590  int yalpha, int uvalpha, int y) \
1591 { \
1592  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1593  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1594 }
1595 
1596 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1597 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1598 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1599  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1600  const int16_t *abuf0, uint8_t *dest, int dstW, \
1601  int uvalpha, int y) \
1602 { \
1603  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1604  dstW, uvalpha, y, fmt, hasAlpha); \
1605 }
1606 
1607 #if CONFIG_SMALL
1608 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1609 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1610 #else
1611 #if CONFIG_SWSCALE_ALPHA
1612 YUV2RGBWRAPPER(yuv2rgb,, a32_1, AV_PIX_FMT_RGB32_1, 1)
1613 YUV2RGBWRAPPER(yuv2rgb,, a32, AV_PIX_FMT_RGB32, 1)
1614 #endif
1615 YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
1616 YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
1617 #endif
1618 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1619 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1620 YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
1621 YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
1622 YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
1623 YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
1624 YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
1625 YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
1626 
1628  uint8_t *dest, int i, int Y, int A, int U, int V,
1629  int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1630 {
1631  int R, G, B;
1632  int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1633 
1634  Y -= c->yuv2rgb_y_offset;
1635  Y *= c->yuv2rgb_y_coeff;
1636  Y += 1 << 21;
1637  R = Y + V*c->yuv2rgb_v2r_coeff;
1638  G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1639  B = Y + U*c->yuv2rgb_u2b_coeff;
1640  if ((R | G | B) & 0xC0000000) {
1641  R = av_clip_uintp2(R, 30);
1642  G = av_clip_uintp2(G, 30);
1643  B = av_clip_uintp2(B, 30);
1644  }
1645 
1646  switch(target) {
1647  case AV_PIX_FMT_ARGB:
1648  dest[0] = hasAlpha ? A : 255;
1649  dest[1] = R >> 22;
1650  dest[2] = G >> 22;
1651  dest[3] = B >> 22;
1652  break;
1653  case AV_PIX_FMT_RGB24:
1654  dest[0] = R >> 22;
1655  dest[1] = G >> 22;
1656  dest[2] = B >> 22;
1657  break;
1658  case AV_PIX_FMT_RGBA:
1659  dest[0] = R >> 22;
1660  dest[1] = G >> 22;
1661  dest[2] = B >> 22;
1662  dest[3] = hasAlpha ? A : 255;
1663  break;
1664  case AV_PIX_FMT_ABGR:
1665  dest[0] = hasAlpha ? A : 255;
1666  dest[1] = B >> 22;
1667  dest[2] = G >> 22;
1668  dest[3] = R >> 22;
1669  break;
1670  case AV_PIX_FMT_BGR24:
1671  dest[0] = B >> 22;
1672  dest[1] = G >> 22;
1673  dest[2] = R >> 22;
1674  break;
1675  case AV_PIX_FMT_BGRA:
1676  dest[0] = B >> 22;
1677  dest[1] = G >> 22;
1678  dest[2] = R >> 22;
1679  dest[3] = hasAlpha ? A : 255;
1680  break;
1681  case AV_PIX_FMT_BGR4_BYTE:
1682  case AV_PIX_FMT_RGB4_BYTE:
1683  case AV_PIX_FMT_BGR8:
1684  case AV_PIX_FMT_RGB8:
1685  {
1686  int r,g,b;
1687 
1688  switch (c->dither) {
1689  default:
1690  case SWS_DITHER_AUTO:
1691  case SWS_DITHER_ED:
1692  R >>= 22;
1693  G >>= 22;
1694  B >>= 22;
1695  R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1696  G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1697  B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1698  c->dither_error[0][i] = err[0];
1699  c->dither_error[1][i] = err[1];
1700  c->dither_error[2][i] = err[2];
1701  r = R >> (isrgb8 ? 5 : 7);
1702  g = G >> (isrgb8 ? 5 : 6);
1703  b = B >> (isrgb8 ? 6 : 7);
1704  r = av_clip(r, 0, isrgb8 ? 7 : 1);
1705  g = av_clip(g, 0, isrgb8 ? 7 : 3);
1706  b = av_clip(b, 0, isrgb8 ? 3 : 1);
1707  err[0] = R - r*(isrgb8 ? 36 : 255);
1708  err[1] = G - g*(isrgb8 ? 36 : 85);
1709  err[2] = B - b*(isrgb8 ? 85 : 255);
1710  break;
1711  case SWS_DITHER_A_DITHER:
1712  if (isrgb8) {
1713  /* see http://pippin.gimp.org/a_dither/ for details/origin */
1714 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
1715  r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
1716  g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
1717  b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
1718  r = av_clip_uintp2(r, 3);
1719  g = av_clip_uintp2(g, 3);
1720  b = av_clip_uintp2(b, 2);
1721  } else {
1722  r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
1723  g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
1724  b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
1725  r = av_clip_uintp2(r, 1);
1726  g = av_clip_uintp2(g, 2);
1727  b = av_clip_uintp2(b, 1);
1728  }
1729  break;
1730  case SWS_DITHER_X_DITHER:
1731  if (isrgb8) {
1732  /* see http://pippin.gimp.org/a_dither/ for details/origin */
1733 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
1734  r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
1735  g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
1736  b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
1737  r = av_clip_uintp2(r, 3);
1738  g = av_clip_uintp2(g, 3);
1739  b = av_clip_uintp2(b, 2);
1740  } else {
1741  r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
1742  g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
1743  b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
1744  r = av_clip_uintp2(r, 1);
1745  g = av_clip_uintp2(g, 2);
1746  b = av_clip_uintp2(b, 1);
1747  }
1748 
1749  break;
1750  }
1751 
1752  if(target == AV_PIX_FMT_BGR4_BYTE) {
1753  dest[0] = r + 2*g + 8*b;
1754  } else if(target == AV_PIX_FMT_RGB4_BYTE) {
1755  dest[0] = b + 2*g + 8*r;
1756  } else if(target == AV_PIX_FMT_BGR8) {
1757  dest[0] = r + 8*g + 64*b;
1758  } else if(target == AV_PIX_FMT_RGB8) {
1759  dest[0] = b + 4*g + 32*r;
1760  } else
1761  av_assert2(0);
1762  break;}
1763  }
1764 }
1765 
1766 static av_always_inline void
1767 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1768  const int16_t **lumSrc, int lumFilterSize,
1769  const int16_t *chrFilter, const int16_t **chrUSrc,
1770  const int16_t **chrVSrc, int chrFilterSize,
1771  const int16_t **alpSrc, uint8_t *dest,
1772  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
1773 {
1774  int i;
1775  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1776  int err[4] = {0};
1777  int A = 0; //init to silence warning
1778 
1779  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
1780  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
1781  step = 1;
1782 
1783  for (i = 0; i < dstW; i++) {
1784  int j;
1785  int Y = 1<<9;
1786  int U = (1<<9)-(128 << 19);
1787  int V = (1<<9)-(128 << 19);
1788 
1789  for (j = 0; j < lumFilterSize; j++) {
1790  Y += lumSrc[j][i] * lumFilter[j];
1791  }
1792  for (j = 0; j < chrFilterSize; j++) {
1793  U += chrUSrc[j][i] * chrFilter[j];
1794  V += chrVSrc[j][i] * chrFilter[j];
1795  }
1796  Y >>= 10;
1797  U >>= 10;
1798  V >>= 10;
1799  if (hasAlpha) {
1800  A = 1 << 18;
1801  for (j = 0; j < lumFilterSize; j++) {
1802  A += alpSrc[j][i] * lumFilter[j];
1803  }
1804  A >>= 19;
1805  if (A & 0x100)
1806  A = av_clip_uint8(A);
1807  }
1808  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1809  dest += step;
1810  }
1811  c->dither_error[0][i] = err[0];
1812  c->dither_error[1][i] = err[1];
1813  c->dither_error[2][i] = err[2];
1814 }
1815 
1816 static av_always_inline void
1818  const int16_t *ubuf[2], const int16_t *vbuf[2],
1819  const int16_t *abuf[2], uint8_t *dest, int dstW,
1820  int yalpha, int uvalpha, int y,
1821  enum AVPixelFormat target, int hasAlpha)
1822 {
1823  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1824  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1825  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1826  *abuf0 = hasAlpha ? abuf[0] : NULL,
1827  *abuf1 = hasAlpha ? abuf[1] : NULL;
1828  int yalpha1 = 4096 - yalpha;
1829  int uvalpha1 = 4096 - uvalpha;
1830  int i;
1831  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1832  int err[4] = {0};
1833  int A = 0; // init to silcene warning
1834 
1835  av_assert2(yalpha <= 4096U);
1836  av_assert2(uvalpha <= 4096U);
1837 
1838  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
1839  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
1840  step = 1;
1841 
1842  for (i = 0; i < dstW; i++) {
1843  int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
1844  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
1845  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
1846 
1847  if (hasAlpha) {
1848  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
1849  if (A & 0x100)
1850  A = av_clip_uint8(A);
1851  }
1852 
1853  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1854  dest += step;
1855  }
1856  c->dither_error[0][i] = err[0];
1857  c->dither_error[1][i] = err[1];
1858  c->dither_error[2][i] = err[2];
1859 }
1860 
1861 static av_always_inline void
1863  const int16_t *ubuf[2], const int16_t *vbuf[2],
1864  const int16_t *abuf0, uint8_t *dest, int dstW,
1865  int uvalpha, int y, enum AVPixelFormat target,
1866  int hasAlpha)
1867 {
1868  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1869  int i;
1870  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1871  int err[4] = {0};
1872 
1873  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
1874  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
1875  step = 1;
1876 
1877  if (uvalpha < 2048) {
1878  int A = 0; //init to silence warning
1879  for (i = 0; i < dstW; i++) {
1880  int Y = buf0[i] << 2;
1881  int U = (ubuf0[i] - (128<<7)) * 4;
1882  int V = (vbuf0[i] - (128<<7)) * 4;
1883 
1884  if (hasAlpha) {
1885  A = (abuf0[i] + 64) >> 7;
1886  if (A & 0x100)
1887  A = av_clip_uint8(A);
1888  }
1889 
1890  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1891  dest += step;
1892  }
1893  } else {
1894  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1895  int A = 0; //init to silence warning
1896  for (i = 0; i < dstW; i++) {
1897  int Y = buf0[i] << 2;
1898  int U = (ubuf0[i] + ubuf1[i] - (128<<8)) << 1;
1899  int V = (vbuf0[i] + vbuf1[i] - (128<<8)) << 1;
1900 
1901  if (hasAlpha) {
1902  A = (abuf0[i] + 64) >> 7;
1903  if (A & 0x100)
1904  A = av_clip_uint8(A);
1905  }
1906 
1907  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1908  dest += step;
1909  }
1910  }
1911 
1912  c->dither_error[0][i] = err[0];
1913  c->dither_error[1][i] = err[1];
1914  c->dither_error[2][i] = err[2];
1915 }
1916 
1917 #if CONFIG_SMALL
1918 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1919 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1920 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1921 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1922 #else
1923 #if CONFIG_SWSCALE_ALPHA
1924 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
1925 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
1926 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
1927 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
1928 #endif
1929 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
1930 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
1931 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
1932 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
1933 #endif
1934 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
1935 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
1936 
1937 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
1938 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
1939 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
1940 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
1941 
1942 static void
1943 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
1944  const int16_t **lumSrc, int lumFilterSize,
1945  const int16_t *chrFilter, const int16_t **chrUSrc,
1946  const int16_t **chrVSrc, int chrFilterSize,
1947  const int16_t **alpSrc, uint8_t **dest,
1948  int dstW, int y)
1949 {
1950  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
1951  int i;
1952  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
1953  uint16_t **dest16 = (uint16_t**)dest;
1954  int SH = 22 + 8 - desc->comp[0].depth;
1955  int A = 0; // init to silence warning
1956 
1957  for (i = 0; i < dstW; i++) {
1958  int j;
1959  int Y = 1 << 9;
1960  int U = (1 << 9) - (128 << 19);
1961  int V = (1 << 9) - (128 << 19);
1962  int R, G, B;
1963 
1964  for (j = 0; j < lumFilterSize; j++)
1965  Y += lumSrc[j][i] * lumFilter[j];
1966 
1967  for (j = 0; j < chrFilterSize; j++) {
1968  U += chrUSrc[j][i] * chrFilter[j];
1969  V += chrVSrc[j][i] * chrFilter[j];
1970  }
1971 
1972  Y >>= 10;
1973  U >>= 10;
1974  V >>= 10;
1975 
1976  if (hasAlpha) {
1977  A = 1 << 18;
1978 
1979  for (j = 0; j < lumFilterSize; j++)
1980  A += alpSrc[j][i] * lumFilter[j];
1981 
1982  A >>= 19;
1983 
1984  if (A & 0x100)
1985  A = av_clip_uint8(A);
1986  }
1987 
1988  Y -= c->yuv2rgb_y_offset;
1989  Y *= c->yuv2rgb_y_coeff;
1990  Y += 1 << (SH-1);
1991  R = Y + V * c->yuv2rgb_v2r_coeff;
1992  G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1993  B = Y + U * c->yuv2rgb_u2b_coeff;
1994 
1995  if ((R | G | B) & 0xC0000000) {
1996  R = av_clip_uintp2(R, 30);
1997  G = av_clip_uintp2(G, 30);
1998  B = av_clip_uintp2(B, 30);
1999  }
2000 
2001  if (SH != 22) {
2002  dest16[0][i] = G >> SH;
2003  dest16[1][i] = B >> SH;
2004  dest16[2][i] = R >> SH;
2005  if (hasAlpha)
2006  dest16[3][i] = A;
2007  } else {
2008  dest[0][i] = G >> 22;
2009  dest[1][i] = B >> 22;
2010  dest[2][i] = R >> 22;
2011  if (hasAlpha)
2012  dest[3][i] = A;
2013  }
2014  }
2015  if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2016  for (i = 0; i < dstW; i++) {
2017  dest16[0][i] = av_bswap16(dest16[0][i]);
2018  dest16[1][i] = av_bswap16(dest16[1][i]);
2019  dest16[2][i] = av_bswap16(dest16[2][i]);
2020  if (hasAlpha)
2021  dest16[3][i] = av_bswap16(dest16[3][i]);
2022  }
2023  }
2024 }
2025 
2026 static void
2027 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
2028  const int16_t *ubuf[2], const int16_t *vbuf[2],
2029  const int16_t *abuf0, uint8_t *dest, int dstW,
2030  int uvalpha, int y)
2031 {
2032  int hasAlpha = !!abuf0;
2033  int i;
2034 
2035  for (i = 0; i < dstW; i++) {
2036  int Y = (buf0[i] + 64) >> 7;
2037  int A;
2038 
2039  Y = av_clip_uint8(Y);
2040 
2041  if (hasAlpha) {
2042  A = (abuf0[i] + 64) >> 7;
2043  if (A & 0x100)
2044  A = av_clip_uint8(A);
2045  }
2046 
2047  dest[i * 2 ] = Y;
2048  dest[i * 2 + 1] = hasAlpha ? A : 255;
2049  }
2050 }
2051 
2052 static void
2053 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
2054  const int16_t *ubuf[2], const int16_t *vbuf[2],
2055  const int16_t *abuf[2], uint8_t *dest, int dstW,
2056  int yalpha, int uvalpha, int y)
2057 {
2058  int hasAlpha = abuf && abuf[0] && abuf[1];
2059  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2060  *abuf0 = hasAlpha ? abuf[0] : NULL,
2061  *abuf1 = hasAlpha ? abuf[1] : NULL;
2062  int yalpha1 = 4096 - yalpha;
2063  int i;
2064 
2065  av_assert2(yalpha <= 4096U);
2066 
2067  for (i = 0; i < dstW; i++) {
2068  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2069  int A;
2070 
2071  Y = av_clip_uint8(Y);
2072 
2073  if (hasAlpha) {
2074  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2075  A = av_clip_uint8(A);
2076  }
2077 
2078  dest[i * 2 ] = Y;
2079  dest[i * 2 + 1] = hasAlpha ? A : 255;
2080  }
2081 }
2082 
2083 static void
2084 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
2085  const int16_t **lumSrc, int lumFilterSize,
2086  const int16_t *chrFilter, const int16_t **chrUSrc,
2087  const int16_t **chrVSrc, int chrFilterSize,
2088  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2089 {
2090  int hasAlpha = !!alpSrc;
2091  int i;
2092 
2093  for (i = 0; i < dstW; i++) {
2094  int j;
2095  int Y = 1 << 18, A = 1 << 18;
2096 
2097  for (j = 0; j < lumFilterSize; j++)
2098  Y += lumSrc[j][i] * lumFilter[j];
2099 
2100  Y >>= 19;
2101  if (Y & 0x100)
2102  Y = av_clip_uint8(Y);
2103 
2104  if (hasAlpha) {
2105  for (j = 0; j < lumFilterSize; j++)
2106  A += alpSrc[j][i] * lumFilter[j];
2107 
2108  A >>= 19;
2109 
2110  if (A & 0x100)
2111  A = av_clip_uint8(A);
2112  }
2113 
2114  dest[2 * i ] = Y;
2115  dest[2 * i + 1] = hasAlpha ? A : 255;
2116  }
2117 }
2118 
2119 static void
2120 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2121  const int16_t **_lumSrc, int lumFilterSize,
2122  const int16_t *chrFilter, const int16_t **_chrUSrc,
2123  const int16_t **_chrVSrc, int chrFilterSize,
2124  const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2125 {
2126  const int32_t **lumSrc = (const int32_t **) _lumSrc,
2127  **chrUSrc = (const int32_t **) _chrUSrc,
2128  **chrVSrc = (const int32_t **) _chrVSrc,
2129  **alpSrc = (const int32_t **) _alpSrc;
2130  int hasAlpha = !!alpSrc;
2131  int i;
2132 
2133  for (i = 0; i < dstW; i++) {
2134  int Y = 1 << 14, U = 1 << 14;
2135  int V = 1 << 14, A = 1 << 14;
2136  int j;
2137 
2138  Y -= 0x40000000;
2139  U -= 0x40000000;
2140  V -= 0x40000000;
2141  A -= 0x40000000;
2142 
2143  for (j = 0; j < lumFilterSize; j++)
2144  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2145 
2146  for (j = 0; j < chrFilterSize; j++)
2147  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2148 
2149  for (j = 0; j < chrFilterSize; j++)
2150  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2151 
2152  if (hasAlpha)
2153  for (j = 0; j < lumFilterSize; j++)
2154  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2155 
2156  Y = 0x8000 + av_clip_int16(Y >> 15);
2157  U = 0x8000 + av_clip_int16(U >> 15);
2158  V = 0x8000 + av_clip_int16(V >> 15);
2159  A = 0x8000 + av_clip_int16(A >> 15);
2160 
2161  AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2162  AV_WL16(dest + 8 * i + 2, Y);
2163  AV_WL16(dest + 8 * i + 4, U);
2164  AV_WL16(dest + 8 * i + 6, V);
2165  }
2166 }
2167 
2169  yuv2planar1_fn *yuv2plane1,
2170  yuv2planarX_fn *yuv2planeX,
2171  yuv2interleavedX_fn *yuv2nv12cX,
2172  yuv2packed1_fn *yuv2packed1,
2173  yuv2packed2_fn *yuv2packed2,
2174  yuv2packedX_fn *yuv2packedX,
2175  yuv2anyX_fn *yuv2anyX)
2176 {
2177  enum AVPixelFormat dstFormat = c->dstFormat;
2178  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
2179 
2180  if (dstFormat == AV_PIX_FMT_P010LE || dstFormat == AV_PIX_FMT_P010BE) {
2181  *yuv2plane1 = isBE(dstFormat) ? yuv2p010l1_BE_c : yuv2p010l1_LE_c;
2182  *yuv2planeX = isBE(dstFormat) ? yuv2p010lX_BE_c : yuv2p010lX_LE_c;
2183  *yuv2nv12cX = yuv2p010cX_c;
2184  } else if (is16BPS(dstFormat)) {
2185  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2186  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2187  } else if (is9_OR_10BPS(dstFormat)) {
2188  if (desc->comp[0].depth == 9) {
2189  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2190  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2191  } else if (desc->comp[0].depth == 10) {
2192  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2193  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2194  } else if (desc->comp[0].depth == 12) {
2195  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2196  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2197  } else if (desc->comp[0].depth == 14) {
2198  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2199  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2200  } else
2201  av_assert0(0);
2202  } else {
2203  *yuv2plane1 = yuv2plane1_8_c;
2204  *yuv2planeX = yuv2planeX_8_c;
2205  if (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)
2206  *yuv2nv12cX = yuv2nv12cX_c;
2207  }
2208 
2209  if(c->flags & SWS_FULL_CHR_H_INT) {
2210  switch (dstFormat) {
2211  case AV_PIX_FMT_RGBA:
2212 #if CONFIG_SMALL
2213  *yuv2packedX = yuv2rgba32_full_X_c;
2214  *yuv2packed2 = yuv2rgba32_full_2_c;
2215  *yuv2packed1 = yuv2rgba32_full_1_c;
2216 #else
2217 #if CONFIG_SWSCALE_ALPHA
2218  if (c->needAlpha) {
2219  *yuv2packedX = yuv2rgba32_full_X_c;
2220  *yuv2packed2 = yuv2rgba32_full_2_c;
2221  *yuv2packed1 = yuv2rgba32_full_1_c;
2222  } else
2223 #endif /* CONFIG_SWSCALE_ALPHA */
2224  {
2225  *yuv2packedX = yuv2rgbx32_full_X_c;
2226  *yuv2packed2 = yuv2rgbx32_full_2_c;
2227  *yuv2packed1 = yuv2rgbx32_full_1_c;
2228  }
2229 #endif /* !CONFIG_SMALL */
2230  break;
2231  case AV_PIX_FMT_ARGB:
2232 #if CONFIG_SMALL
2233  *yuv2packedX = yuv2argb32_full_X_c;
2234  *yuv2packed2 = yuv2argb32_full_2_c;
2235  *yuv2packed1 = yuv2argb32_full_1_c;
2236 #else
2237 #if CONFIG_SWSCALE_ALPHA
2238  if (c->needAlpha) {
2239  *yuv2packedX = yuv2argb32_full_X_c;
2240  *yuv2packed2 = yuv2argb32_full_2_c;
2241  *yuv2packed1 = yuv2argb32_full_1_c;
2242  } else
2243 #endif /* CONFIG_SWSCALE_ALPHA */
2244  {
2245  *yuv2packedX = yuv2xrgb32_full_X_c;
2246  *yuv2packed2 = yuv2xrgb32_full_2_c;
2247  *yuv2packed1 = yuv2xrgb32_full_1_c;
2248  }
2249 #endif /* !CONFIG_SMALL */
2250  break;
2251  case AV_PIX_FMT_BGRA:
2252 #if CONFIG_SMALL
2253  *yuv2packedX = yuv2bgra32_full_X_c;
2254  *yuv2packed2 = yuv2bgra32_full_2_c;
2255  *yuv2packed1 = yuv2bgra32_full_1_c;
2256 #else
2257 #if CONFIG_SWSCALE_ALPHA
2258  if (c->needAlpha) {
2259  *yuv2packedX = yuv2bgra32_full_X_c;
2260  *yuv2packed2 = yuv2bgra32_full_2_c;
2261  *yuv2packed1 = yuv2bgra32_full_1_c;
2262  } else
2263 #endif /* CONFIG_SWSCALE_ALPHA */
2264  {
2265  *yuv2packedX = yuv2bgrx32_full_X_c;
2266  *yuv2packed2 = yuv2bgrx32_full_2_c;
2267  *yuv2packed1 = yuv2bgrx32_full_1_c;
2268  }
2269 #endif /* !CONFIG_SMALL */
2270  break;
2271  case AV_PIX_FMT_ABGR:
2272 #if CONFIG_SMALL
2273  *yuv2packedX = yuv2abgr32_full_X_c;
2274  *yuv2packed2 = yuv2abgr32_full_2_c;
2275  *yuv2packed1 = yuv2abgr32_full_1_c;
2276 #else
2277 #if CONFIG_SWSCALE_ALPHA
2278  if (c->needAlpha) {
2279  *yuv2packedX = yuv2abgr32_full_X_c;
2280  *yuv2packed2 = yuv2abgr32_full_2_c;
2281  *yuv2packed1 = yuv2abgr32_full_1_c;
2282  } else
2283 #endif /* CONFIG_SWSCALE_ALPHA */
2284  {
2285  *yuv2packedX = yuv2xbgr32_full_X_c;
2286  *yuv2packed2 = yuv2xbgr32_full_2_c;
2287  *yuv2packed1 = yuv2xbgr32_full_1_c;
2288  }
2289 #endif /* !CONFIG_SMALL */
2290  break;
2291  case AV_PIX_FMT_RGBA64LE:
2292 #if CONFIG_SWSCALE_ALPHA
2293  if (c->needAlpha) {
2294  *yuv2packedX = yuv2rgba64le_full_X_c;
2295  *yuv2packed2 = yuv2rgba64le_full_2_c;
2296  *yuv2packed1 = yuv2rgba64le_full_1_c;
2297  } else
2298 #endif /* CONFIG_SWSCALE_ALPHA */
2299  {
2300  *yuv2packedX = yuv2rgbx64le_full_X_c;
2301  *yuv2packed2 = yuv2rgbx64le_full_2_c;
2302  *yuv2packed1 = yuv2rgbx64le_full_1_c;
2303  }
2304  break;
2305  case AV_PIX_FMT_RGBA64BE:
2306 #if CONFIG_SWSCALE_ALPHA
2307  if (c->needAlpha) {
2308  *yuv2packedX = yuv2rgba64be_full_X_c;
2309  *yuv2packed2 = yuv2rgba64be_full_2_c;
2310  *yuv2packed1 = yuv2rgba64be_full_1_c;
2311  } else
2312 #endif /* CONFIG_SWSCALE_ALPHA */
2313  {
2314  *yuv2packedX = yuv2rgbx64be_full_X_c;
2315  *yuv2packed2 = yuv2rgbx64be_full_2_c;
2316  *yuv2packed1 = yuv2rgbx64be_full_1_c;
2317  }
2318  break;
2319  case AV_PIX_FMT_BGRA64LE:
2320 #if CONFIG_SWSCALE_ALPHA
2321  if (c->needAlpha) {
2322  *yuv2packedX = yuv2bgra64le_full_X_c;
2323  *yuv2packed2 = yuv2bgra64le_full_2_c;
2324  *yuv2packed1 = yuv2bgra64le_full_1_c;
2325  } else
2326 #endif /* CONFIG_SWSCALE_ALPHA */
2327  {
2328  *yuv2packedX = yuv2bgrx64le_full_X_c;
2329  *yuv2packed2 = yuv2bgrx64le_full_2_c;
2330  *yuv2packed1 = yuv2bgrx64le_full_1_c;
2331  }
2332  break;
2333  case AV_PIX_FMT_BGRA64BE:
2334 #if CONFIG_SWSCALE_ALPHA
2335  if (c->needAlpha) {
2336  *yuv2packedX = yuv2bgra64be_full_X_c;
2337  *yuv2packed2 = yuv2bgra64be_full_2_c;
2338  *yuv2packed1 = yuv2bgra64be_full_1_c;
2339  } else
2340 #endif /* CONFIG_SWSCALE_ALPHA */
2341  {
2342  *yuv2packedX = yuv2bgrx64be_full_X_c;
2343  *yuv2packed2 = yuv2bgrx64be_full_2_c;
2344  *yuv2packed1 = yuv2bgrx64be_full_1_c;
2345  }
2346  break;
2347 
2348  case AV_PIX_FMT_RGB24:
2349  *yuv2packedX = yuv2rgb24_full_X_c;
2350  *yuv2packed2 = yuv2rgb24_full_2_c;
2351  *yuv2packed1 = yuv2rgb24_full_1_c;
2352  break;
2353  case AV_PIX_FMT_BGR24:
2354  *yuv2packedX = yuv2bgr24_full_X_c;
2355  *yuv2packed2 = yuv2bgr24_full_2_c;
2356  *yuv2packed1 = yuv2bgr24_full_1_c;
2357  break;
2358  case AV_PIX_FMT_RGB48LE:
2359  *yuv2packedX = yuv2rgb48le_full_X_c;
2360  *yuv2packed2 = yuv2rgb48le_full_2_c;
2361  *yuv2packed1 = yuv2rgb48le_full_1_c;
2362  break;
2363  case AV_PIX_FMT_BGR48LE:
2364  *yuv2packedX = yuv2bgr48le_full_X_c;
2365  *yuv2packed2 = yuv2bgr48le_full_2_c;
2366  *yuv2packed1 = yuv2bgr48le_full_1_c;
2367  break;
2368  case AV_PIX_FMT_RGB48BE:
2369  *yuv2packedX = yuv2rgb48be_full_X_c;
2370  *yuv2packed2 = yuv2rgb48be_full_2_c;
2371  *yuv2packed1 = yuv2rgb48be_full_1_c;
2372  break;
2373  case AV_PIX_FMT_BGR48BE:
2374  *yuv2packedX = yuv2bgr48be_full_X_c;
2375  *yuv2packed2 = yuv2bgr48be_full_2_c;
2376  *yuv2packed1 = yuv2bgr48be_full_1_c;
2377  break;
2378  case AV_PIX_FMT_BGR4_BYTE:
2379  *yuv2packedX = yuv2bgr4_byte_full_X_c;
2380  *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2381  *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2382  break;
2383  case AV_PIX_FMT_RGB4_BYTE:
2384  *yuv2packedX = yuv2rgb4_byte_full_X_c;
2385  *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2386  *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2387  break;
2388  case AV_PIX_FMT_BGR8:
2389  *yuv2packedX = yuv2bgr8_full_X_c;
2390  *yuv2packed2 = yuv2bgr8_full_2_c;
2391  *yuv2packed1 = yuv2bgr8_full_1_c;
2392  break;
2393  case AV_PIX_FMT_RGB8:
2394  *yuv2packedX = yuv2rgb8_full_X_c;
2395  *yuv2packed2 = yuv2rgb8_full_2_c;
2396  *yuv2packed1 = yuv2rgb8_full_1_c;
2397  break;
2398  case AV_PIX_FMT_GBRP:
2399  case AV_PIX_FMT_GBRP9BE:
2400  case AV_PIX_FMT_GBRP9LE:
2401  case AV_PIX_FMT_GBRP10BE:
2402  case AV_PIX_FMT_GBRP10LE:
2403  case AV_PIX_FMT_GBRP12BE:
2404  case AV_PIX_FMT_GBRP12LE:
2405  case AV_PIX_FMT_GBRP14BE:
2406  case AV_PIX_FMT_GBRP14LE:
2407  case AV_PIX_FMT_GBRP16BE:
2408  case AV_PIX_FMT_GBRP16LE:
2409  case AV_PIX_FMT_GBRAP:
2410  *yuv2anyX = yuv2gbrp_full_X_c;
2411  break;
2412  }
2413  if (!*yuv2packedX && !*yuv2anyX)
2414  goto YUV_PACKED;
2415  } else {
2416  YUV_PACKED:
2417  switch (dstFormat) {
2418  case AV_PIX_FMT_RGBA64LE:
2419 #if CONFIG_SWSCALE_ALPHA
2420  if (c->needAlpha) {
2421  *yuv2packed1 = yuv2rgba64le_1_c;
2422  *yuv2packed2 = yuv2rgba64le_2_c;
2423  *yuv2packedX = yuv2rgba64le_X_c;
2424  } else
2425 #endif /* CONFIG_SWSCALE_ALPHA */
2426  {
2427  *yuv2packed1 = yuv2rgbx64le_1_c;
2428  *yuv2packed2 = yuv2rgbx64le_2_c;
2429  *yuv2packedX = yuv2rgbx64le_X_c;
2430  }
2431  break;
2432  case AV_PIX_FMT_RGBA64BE:
2433 #if CONFIG_SWSCALE_ALPHA
2434  if (c->needAlpha) {
2435  *yuv2packed1 = yuv2rgba64be_1_c;
2436  *yuv2packed2 = yuv2rgba64be_2_c;
2437  *yuv2packedX = yuv2rgba64be_X_c;
2438  } else
2439 #endif /* CONFIG_SWSCALE_ALPHA */
2440  {
2441  *yuv2packed1 = yuv2rgbx64be_1_c;
2442  *yuv2packed2 = yuv2rgbx64be_2_c;
2443  *yuv2packedX = yuv2rgbx64be_X_c;
2444  }
2445  break;
2446  case AV_PIX_FMT_BGRA64LE:
2447 #if CONFIG_SWSCALE_ALPHA
2448  if (c->needAlpha) {
2449  *yuv2packed1 = yuv2bgra64le_1_c;
2450  *yuv2packed2 = yuv2bgra64le_2_c;
2451  *yuv2packedX = yuv2bgra64le_X_c;
2452  } else
2453 #endif /* CONFIG_SWSCALE_ALPHA */
2454  {
2455  *yuv2packed1 = yuv2bgrx64le_1_c;
2456  *yuv2packed2 = yuv2bgrx64le_2_c;
2457  *yuv2packedX = yuv2bgrx64le_X_c;
2458  }
2459  break;
2460  case AV_PIX_FMT_BGRA64BE:
2461 #if CONFIG_SWSCALE_ALPHA
2462  if (c->needAlpha) {
2463  *yuv2packed1 = yuv2bgra64be_1_c;
2464  *yuv2packed2 = yuv2bgra64be_2_c;
2465  *yuv2packedX = yuv2bgra64be_X_c;
2466  } else
2467 #endif /* CONFIG_SWSCALE_ALPHA */
2468  {
2469  *yuv2packed1 = yuv2bgrx64be_1_c;
2470  *yuv2packed2 = yuv2bgrx64be_2_c;
2471  *yuv2packedX = yuv2bgrx64be_X_c;
2472  }
2473  break;
2474  case AV_PIX_FMT_RGB48LE:
2475  *yuv2packed1 = yuv2rgb48le_1_c;
2476  *yuv2packed2 = yuv2rgb48le_2_c;
2477  *yuv2packedX = yuv2rgb48le_X_c;
2478  break;
2479  case AV_PIX_FMT_RGB48BE:
2480  *yuv2packed1 = yuv2rgb48be_1_c;
2481  *yuv2packed2 = yuv2rgb48be_2_c;
2482  *yuv2packedX = yuv2rgb48be_X_c;
2483  break;
2484  case AV_PIX_FMT_BGR48LE:
2485  *yuv2packed1 = yuv2bgr48le_1_c;
2486  *yuv2packed2 = yuv2bgr48le_2_c;
2487  *yuv2packedX = yuv2bgr48le_X_c;
2488  break;
2489  case AV_PIX_FMT_BGR48BE:
2490  *yuv2packed1 = yuv2bgr48be_1_c;
2491  *yuv2packed2 = yuv2bgr48be_2_c;
2492  *yuv2packedX = yuv2bgr48be_X_c;
2493  break;
2494  case AV_PIX_FMT_RGB32:
2495  case AV_PIX_FMT_BGR32:
2496 #if CONFIG_SMALL
2497  *yuv2packed1 = yuv2rgb32_1_c;
2498  *yuv2packed2 = yuv2rgb32_2_c;
2499  *yuv2packedX = yuv2rgb32_X_c;
2500 #else
2501 #if CONFIG_SWSCALE_ALPHA
2502  if (c->needAlpha) {
2503  *yuv2packed1 = yuv2rgba32_1_c;
2504  *yuv2packed2 = yuv2rgba32_2_c;
2505  *yuv2packedX = yuv2rgba32_X_c;
2506  } else
2507 #endif /* CONFIG_SWSCALE_ALPHA */
2508  {
2509  *yuv2packed1 = yuv2rgbx32_1_c;
2510  *yuv2packed2 = yuv2rgbx32_2_c;
2511  *yuv2packedX = yuv2rgbx32_X_c;
2512  }
2513 #endif /* !CONFIG_SMALL */
2514  break;
2515  case AV_PIX_FMT_RGB32_1:
2516  case AV_PIX_FMT_BGR32_1:
2517 #if CONFIG_SMALL
2518  *yuv2packed1 = yuv2rgb32_1_1_c;
2519  *yuv2packed2 = yuv2rgb32_1_2_c;
2520  *yuv2packedX = yuv2rgb32_1_X_c;
2521 #else
2522 #if CONFIG_SWSCALE_ALPHA
2523  if (c->needAlpha) {
2524  *yuv2packed1 = yuv2rgba32_1_1_c;
2525  *yuv2packed2 = yuv2rgba32_1_2_c;
2526  *yuv2packedX = yuv2rgba32_1_X_c;
2527  } else
2528 #endif /* CONFIG_SWSCALE_ALPHA */
2529  {
2530  *yuv2packed1 = yuv2rgbx32_1_1_c;
2531  *yuv2packed2 = yuv2rgbx32_1_2_c;
2532  *yuv2packedX = yuv2rgbx32_1_X_c;
2533  }
2534 #endif /* !CONFIG_SMALL */
2535  break;
2536  case AV_PIX_FMT_RGB24:
2537  *yuv2packed1 = yuv2rgb24_1_c;
2538  *yuv2packed2 = yuv2rgb24_2_c;
2539  *yuv2packedX = yuv2rgb24_X_c;
2540  break;
2541  case AV_PIX_FMT_BGR24:
2542  *yuv2packed1 = yuv2bgr24_1_c;
2543  *yuv2packed2 = yuv2bgr24_2_c;
2544  *yuv2packedX = yuv2bgr24_X_c;
2545  break;
2546  case AV_PIX_FMT_RGB565LE:
2547  case AV_PIX_FMT_RGB565BE:
2548  case AV_PIX_FMT_BGR565LE:
2549  case AV_PIX_FMT_BGR565BE:
2550  *yuv2packed1 = yuv2rgb16_1_c;
2551  *yuv2packed2 = yuv2rgb16_2_c;
2552  *yuv2packedX = yuv2rgb16_X_c;
2553  break;
2554  case AV_PIX_FMT_RGB555LE:
2555  case AV_PIX_FMT_RGB555BE:
2556  case AV_PIX_FMT_BGR555LE:
2557  case AV_PIX_FMT_BGR555BE:
2558  *yuv2packed1 = yuv2rgb15_1_c;
2559  *yuv2packed2 = yuv2rgb15_2_c;
2560  *yuv2packedX = yuv2rgb15_X_c;
2561  break;
2562  case AV_PIX_FMT_RGB444LE:
2563  case AV_PIX_FMT_RGB444BE:
2564  case AV_PIX_FMT_BGR444LE:
2565  case AV_PIX_FMT_BGR444BE:
2566  *yuv2packed1 = yuv2rgb12_1_c;
2567  *yuv2packed2 = yuv2rgb12_2_c;
2568  *yuv2packedX = yuv2rgb12_X_c;
2569  break;
2570  case AV_PIX_FMT_RGB8:
2571  case AV_PIX_FMT_BGR8:
2572  *yuv2packed1 = yuv2rgb8_1_c;
2573  *yuv2packed2 = yuv2rgb8_2_c;
2574  *yuv2packedX = yuv2rgb8_X_c;
2575  break;
2576  case AV_PIX_FMT_RGB4:
2577  case AV_PIX_FMT_BGR4:
2578  *yuv2packed1 = yuv2rgb4_1_c;
2579  *yuv2packed2 = yuv2rgb4_2_c;
2580  *yuv2packedX = yuv2rgb4_X_c;
2581  break;
2582  case AV_PIX_FMT_RGB4_BYTE:
2583  case AV_PIX_FMT_BGR4_BYTE:
2584  *yuv2packed1 = yuv2rgb4b_1_c;
2585  *yuv2packed2 = yuv2rgb4b_2_c;
2586  *yuv2packedX = yuv2rgb4b_X_c;
2587  break;
2588  }
2589  }
2590  switch (dstFormat) {
2591  case AV_PIX_FMT_MONOWHITE:
2592  *yuv2packed1 = yuv2monowhite_1_c;
2593  *yuv2packed2 = yuv2monowhite_2_c;
2594  *yuv2packedX = yuv2monowhite_X_c;
2595  break;
2596  case AV_PIX_FMT_MONOBLACK:
2597  *yuv2packed1 = yuv2monoblack_1_c;
2598  *yuv2packed2 = yuv2monoblack_2_c;
2599  *yuv2packedX = yuv2monoblack_X_c;
2600  break;
2601  case AV_PIX_FMT_YUYV422:
2602  *yuv2packed1 = yuv2yuyv422_1_c;
2603  *yuv2packed2 = yuv2yuyv422_2_c;
2604  *yuv2packedX = yuv2yuyv422_X_c;
2605  break;
2606  case AV_PIX_FMT_YVYU422:
2607  *yuv2packed1 = yuv2yvyu422_1_c;
2608  *yuv2packed2 = yuv2yvyu422_2_c;
2609  *yuv2packedX = yuv2yvyu422_X_c;
2610  break;
2611  case AV_PIX_FMT_UYVY422:
2612  *yuv2packed1 = yuv2uyvy422_1_c;
2613  *yuv2packed2 = yuv2uyvy422_2_c;
2614  *yuv2packedX = yuv2uyvy422_X_c;
2615  break;
2616  case AV_PIX_FMT_YA8:
2617  *yuv2packed1 = yuv2ya8_1_c;
2618  *yuv2packed2 = yuv2ya8_2_c;
2619  *yuv2packedX = yuv2ya8_X_c;
2620  break;
2621  case AV_PIX_FMT_AYUV64LE:
2622  *yuv2packedX = yuv2ayuv64le_X_c;
2623  break;
2624  }
2625 }
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:82
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:771
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1517
static void yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **_lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **_chrUSrc, const int16_t **_chrVSrc, int chrFilterSize, const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2120
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:2168
static int shift(int a, int b)
Definition: sonic.c:82
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
Definition: vf_geq.c:46
#define YUVRGB_TABLE_HEADROOM
#define X_DITHER(u, v)
static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:396
8 bits gray, 8 bits alpha
Definition: pixfmt.h:154
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:220
#define A1
Definition: binkdsp.c:31
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:85
const char * g
Definition: vf_curves.c:112
const char * desc
Definition: nvenc.c:101
#define accumulate_bit(acc, val)
Definition: output.c:406
int acc
Definition: yuv2rgb.c:546
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:276
const uint8_t ff_dither_2x2_8[][8]
Definition: output.c:45
static av_always_inline void yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1862
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:180
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:116
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:219
const char * b
Definition: vf_curves.c:113
static void yuv2p010l1_LE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:375
uint8_t * table_bU[256+2 *YUVRGB_TABLE_HEADROOM]
#define av_bswap16
Definition: bswap.h:31
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Convenience header that includes libavutil's core.
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:119
planar GBR 4:4:4 36bpp, little-endian
Definition: pixfmt.h:269
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:151
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:330
planar GBR 4:4:4 36bpp, big-endian
Definition: pixfmt.h:268
const uint8_t ff_dither_8x8_220[][8]
Definition: output.c:84
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:144
#define r_b
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:158
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:88
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:616
Macro definitions for various function/variable attributes.
#define b_r
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:114
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:86
const uint8_t ff_dither_8x8_32[][8]
Definition: output.c:59
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t ff_dither_2x2_4[][8]
Definition: output.c:39
void(* yuv2interleavedX_fn)(struct SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:173
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
const uint8_t ff_dither_4x4_16[][8]
Definition: output.c:51
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:111
#define Y
Definition: vf_boxblur.c:76
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:217
static void yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Definition: output.c:2053
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:150
#define SWS_FULL_CHR_H_INT
Definition: swscale.h:79
Definition: vf_geq.c:46
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:113
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:95
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Definition: output.c:1943
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:674
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:101
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:186
static av_always_inline void yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1817
external API header
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1767
enum AVPixelFormat dstFormat
Destination pixel format.
uint8_t * table_gU[256+2 *YUVRGB_TABLE_HEADROOM]
static void yuv2p010cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:352
static void yuv2p010l1_c(const int16_t *src, uint16_t *dest, int dstW, int big_endian)
Definition: output.c:322
#define A(x)
Definition: vp56_arith.h:28
int * dither_error[4]
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
#define U(x)
Definition: vp56_arith.h:37
planar GBR 4:4:4 27bpp, big-endian
Definition: pixfmt.h:182
#define A2
Definition: binkdsp.c:32
#define B_R
Definition: output.c:768
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:321
static av_always_inline void yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1143
static void yuv2p010l1_BE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:382
const uint8_t * d64
Definition: yuv2rgb.c:494
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:96
const char * r
Definition: vf_curves.c:111
static const uint8_t dither[8][8]
Definition: vf_fspp.c:57
static av_always_inline void yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:777
static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:389
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:90
simple assert() macros that are a bit more flexible than ISO C assert().
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
Definition: pixfmt.h:299
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:206
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:226
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:93
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:160
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:470
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:707
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
as above, but U and V bytes are swapped
Definition: pixfmt.h:91
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
static void yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2084
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:89
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:531
static av_always_inline int is9_OR_10BPS(enum AVPixelFormat pix_fmt)
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:268
int32_t
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
Definition: pixfmt.h:222
int table_gV[256+2 *YUVRGB_TABLE_HEADROOM]
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:65
uint8_t * table_rV[256+2 *YUVRGB_TABLE_HEADROOM]
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
Definition: pixfmt.h:298
#define src
Definition: vp9dsp.c:530
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:159
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:118
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:334
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:320
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
#define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes)
Definition: output.c:1223
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:84
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:417
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static void yuv2ya8_1_c(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y)
Definition: output.c:2027
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:184
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:318
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1477
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:63
planar GBR 4:4:4 42bpp, little-endian
Definition: pixfmt.h:271
const uint8_t ff_dither_8x8_73[][8]
Definition: output.c:71
void * buf
Definition: avisynth_c.h:690
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:635
byte swapping routines
const uint8_t * d32
Definition: yuv2rgb.c:493
planar GBR 4:4:4 42bpp, big-endian
Definition: pixfmt.h:270
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1304
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:121
#define SH(val, pdst)
#define u(width,...)
static av_always_inline void yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1027
static void yuv2p010lX_c(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian)
Definition: output.c:335
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1596
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:333
const uint8_t * chrDither8
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:115
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:153
SwsDither dither
#define A_DITHER(u, v)
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:72
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
if(ret< 0)
Definition: vf_mcdeint.c:282
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:71
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:229
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1423
planar GBR 4:4:4 27bpp, little-endian
Definition: pixfmt.h:183
static double c[64]
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:110
#define AV_PIX_FMT_BGR444
Definition: pixfmt.h:335
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:769
const uint8_t * d128
Definition: yuv2rgb.c:545
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:87
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:329
static av_always_inline void yuv2rgb_write_full(SwsContext *c, uint8_t *dest, int i, int Y, int A, int U, int V, int y, enum AVPixelFormat target, int hasAlpha, int err[4])
Definition: output.c:1627
static av_always_inline void yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1091
static av_always_inline void yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:924
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:319
static av_always_inline void yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:193
static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:278
static av_always_inline void yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:859
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:328
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:120
#define R_B
Definition: output.c:767
#define av_always_inline
Definition: attributes.h:39
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:187
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:152
Definition: vf_geq.c:46
int depth
Number of bits in the component.
Definition: pixdesc.h:58
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:581
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:185
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:218
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
Definition: pixfmt.h:293
#define V
Definition: avdct.c:30