FFmpeg
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "rgb2rgb.h"
36 #include "swscale.h"
37 #include "swscale_internal.h"
38 
39 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_4)[][8] = {
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
43 };
44 
45 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_8)[][8] = {
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
49 };
50 
51 DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[][8] = {
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
57 };
58 
59 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[][8] = {
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
69 };
70 
71 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[][8] = {
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
81 };
82 
83 #if 1
84 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
94 };
95 #elif 1
96 // tries to correct a gamma of 1.5
97 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
107 };
108 #elif 1
109 // tries to correct a gamma of 2.0
110 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
120 };
121 #else
122 // tries to correct a gamma of 2.5
123 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
133 };
134 #endif
135 
136 #define IS_BE_LE 0
137 #define IS_BE_BE 1
138 /* ENDIAN_IDENTIFIER needs to be "BE" or "LE". */
139 #define IS_BE(ENDIAN_IDENTIFIER) IS_BE_ ## ENDIAN_IDENTIFIER
140 
141 #define output_pixel(pos, val, bias, signedness) \
142  if (big_endian) { \
143  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
144  } else { \
145  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
146  }
147 
148 static av_always_inline void
149 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
150  int big_endian, int output_bits)
151 {
152  int i;
153  int shift = 3;
154  av_assert0(output_bits == 16);
155 
156  for (i = 0; i < dstW; i++) {
157  int val = src[i] + (1 << (shift - 1));
158  output_pixel(&dest[i], val, 0, uint);
159  }
160 }
161 
162 static av_always_inline void
163 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
164  const int32_t **src, uint16_t *dest, int dstW,
165  int big_endian, int output_bits)
166 {
167  int i;
168  int shift = 15;
169  av_assert0(output_bits == 16);
170 
171  for (i = 0; i < dstW; i++) {
172  int val = 1 << (shift - 1);
173  int j;
174 
175  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
176  * filters (or anything with negative coeffs, the range can be slightly
177  * wider in both directions. To account for this overflow, we subtract
178  * a constant so it always fits in the signed range (assuming a
179  * reasonable filterSize), and re-add that at the end. */
180  val -= 0x40000000;
181  for (j = 0; j < filterSize; j++)
182  val += src[j][i] * (unsigned)filter[j];
183 
184  output_pixel(&dest[i], val, 0x8000, int);
185  }
186 }
187 
188 static av_always_inline void
189 yuv2nv12cX_16_c_template(int big_endian, const uint8_t *chrDither,
190  const int16_t *chrFilter, int chrFilterSize,
191  const int16_t **chrUSrc, const int16_t **chrVSrc,
192  uint8_t *dest8, int chrDstW, int output_bits)
193 {
194  uint16_t *dest = (uint16_t*)dest8;
195  const int32_t **uSrc = (const int32_t **)chrUSrc;
196  const int32_t **vSrc = (const int32_t **)chrVSrc;
197  int shift = 15;
198  int i, j;
199  av_assert0(output_bits == 16);
200 
201  for (i = 0; i < chrDstW; i++) {
202  int u = 1 << (shift - 1);
203  int v = 1 << (shift - 1);
204 
205  /* See yuv2planeX_16_c_template for details. */
206  u -= 0x40000000;
207  v -= 0x40000000;
208  for (j = 0; j < chrFilterSize; j++) {
209  u += uSrc[j][i] * (unsigned)chrFilter[j];
210  v += vSrc[j][i] * (unsigned)chrFilter[j];
211  }
212 
213  output_pixel(&dest[2*i] , u, 0x8000, int);
214  output_pixel(&dest[2*i+1], v, 0x8000, int);
215  }
216 }
217 
218 static av_always_inline void
219 yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
220 {
221  static const int big_endian = HAVE_BIGENDIAN;
222  static const int shift = 3;
223  static const float float_mult = 1.0f / 65535.0f;
224  int i, val;
225  uint16_t val_uint;
226 
227  for (i = 0; i < dstW; ++i){
228  val = src[i] + (1 << (shift - 1));
229  output_pixel(&val_uint, val, 0, uint);
230  dest[i] = float_mult * (float)val_uint;
231  }
232 }
233 
234 static av_always_inline void
235 yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
236 {
237  static const int big_endian = HAVE_BIGENDIAN;
238  static const int shift = 3;
239  static const float float_mult = 1.0f / 65535.0f;
240  int i, val;
241  uint16_t val_uint;
242 
243  for (i = 0; i < dstW; ++i){
244  val = src[i] + (1 << (shift - 1));
245  output_pixel(&val_uint, val, 0, uint);
246  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
247  }
248 }
249 
250 static av_always_inline void
251 yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src,
252  float *dest, int dstW)
253 {
254  static const int big_endian = HAVE_BIGENDIAN;
255  static const int shift = 15;
256  static const float float_mult = 1.0f / 65535.0f;
257  int i, j, val;
258  uint16_t val_uint;
259 
260  for (i = 0; i < dstW; ++i){
261  val = (1 << (shift - 1)) - 0x40000000;
262  for (j = 0; j < filterSize; ++j){
263  val += src[j][i] * (unsigned)filter[j];
264  }
265  output_pixel(&val_uint, val, 0x8000, int);
266  dest[i] = float_mult * (float)val_uint;
267  }
268 }
269 
270 static av_always_inline void
271 yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src,
272  uint32_t *dest, int dstW)
273 {
274  static const int big_endian = HAVE_BIGENDIAN;
275  static const int shift = 15;
276  static const float float_mult = 1.0f / 65535.0f;
277  int i, j, val;
278  uint16_t val_uint;
279 
280  for (i = 0; i < dstW; ++i){
281  val = (1 << (shift - 1)) - 0x40000000;
282  for (j = 0; j < filterSize; ++j){
283  val += src[j][i] * (unsigned)filter[j];
284  }
285  output_pixel(&val_uint, val, 0x8000, int);
286  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
287  }
288 }
289 
290 #define yuv2plane1_float(template, dest_type, BE_LE) \
291 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \
292  const uint8_t *dither, int offset) \
293 { \
294  template((const int32_t *)src, (dest_type *)dest, dstW); \
295 }
296 
297 #define yuv2planeX_float(template, dest_type, BE_LE) \
298 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \
299  const int16_t **src, uint8_t *dest, int dstW, \
300  const uint8_t *dither, int offset) \
301 { \
302  template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \
303 }
304 
305 #if HAVE_BIGENDIAN
310 #else
315 #endif
316 
317 #undef output_pixel
318 
319 #define output_pixel(pos, val) \
320  if (big_endian) { \
321  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
322  } else { \
323  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
324  }
325 
326 static av_always_inline void
327 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
328  int big_endian, int output_bits)
329 {
330  int i;
331  int shift = 15 - output_bits;
332 
333  for (i = 0; i < dstW; i++) {
334  int val = src[i] + (1 << (shift - 1));
335  output_pixel(&dest[i], val);
336  }
337 }
338 
339 static av_always_inline void
340 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
341  const int16_t **src, uint16_t *dest, int dstW,
342  int big_endian, int output_bits)
343 {
344  int i;
345  int shift = 11 + 16 - output_bits;
346 
347  for (i = 0; i < dstW; i++) {
348  int val = 1 << (shift - 1);
349  int j;
350 
351  for (j = 0; j < filterSize; j++)
352  val += src[j][i] * filter[j];
353 
354  output_pixel(&dest[i], val);
355  }
356 }
357 
358 #undef output_pixel
359 
360 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
361 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
362  uint8_t *dest, int dstW, \
363  const uint8_t *dither, int offset)\
364 { \
365  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
366  (uint16_t *) dest, dstW, is_be, bits); \
367 }\
368 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
369  const int16_t **src, uint8_t *dest, int dstW, \
370  const uint8_t *dither, int offset)\
371 { \
372  yuv2planeX_## template_size ## _c_template(filter, \
373  filterSize, (const typeX_t **) src, \
374  (uint16_t *) dest, dstW, is_be, bits); \
375 }
376 
377 yuv2NBPS( 9, BE, 1, 10, int16_t)
378 yuv2NBPS( 9, LE, 0, 10, int16_t)
379 yuv2NBPS(10, BE, 1, 10, int16_t)
380 yuv2NBPS(10, LE, 0, 10, int16_t)
381 yuv2NBPS(12, BE, 1, 10, int16_t)
382 yuv2NBPS(12, LE, 0, 10, int16_t)
383 yuv2NBPS(14, BE, 1, 10, int16_t)
384 yuv2NBPS(14, LE, 0, 10, int16_t)
385 yuv2NBPS(16, BE, 1, 16, int32_t)
386 yuv2NBPS(16, LE, 0, 16, int32_t)
387 
388 
389 static void yuv2nv12cX_16LE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
390  const int16_t *chrFilter, int chrFilterSize,
391  const int16_t **chrUSrc, const int16_t **chrVSrc,
392  uint8_t *dest8, int chrDstW)
393 {
394  yuv2nv12cX_16_c_template(0, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW, 16);
395 }
396 
397 static void yuv2nv12cX_16BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
398  const int16_t *chrFilter, int chrFilterSize,
399  const int16_t **chrUSrc, const int16_t **chrVSrc,
400  uint8_t *dest8, int chrDstW)
401 {
402  yuv2nv12cX_16_c_template(1, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW, 16);
403 }
404 
405 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
406  const int16_t **src, uint8_t *dest, int dstW,
407  const uint8_t *dither, int offset)
408 {
409  int i;
410  for (i=0; i<dstW; i++) {
411  int val = dither[(i + offset) & 7] << 12;
412  int j;
413  for (j=0; j<filterSize; j++)
414  val += src[j][i] * filter[j];
415 
416  dest[i]= av_clip_uint8(val>>19);
417  }
418 }
419 
420 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
421  const uint8_t *dither, int offset)
422 {
423  int i;
424  for (i=0; i<dstW; i++) {
425  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
426  dest[i]= av_clip_uint8(val);
427  }
428 }
429 
430 static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
431  const int16_t *chrFilter, int chrFilterSize,
432  const int16_t **chrUSrc, const int16_t **chrVSrc,
433  uint8_t *dest, int chrDstW)
434 {
435  int i;
436 
437  if (!isSwappedChroma(dstFormat))
438  for (i=0; i<chrDstW; i++) {
439  int u = chrDither[i & 7] << 12;
440  int v = chrDither[(i + 3) & 7] << 12;
441  int j;
442  for (j=0; j<chrFilterSize; j++) {
443  u += chrUSrc[j][i] * chrFilter[j];
444  v += chrVSrc[j][i] * chrFilter[j];
445  }
446 
447  dest[2*i]= av_clip_uint8(u>>19);
448  dest[2*i+1]= av_clip_uint8(v>>19);
449  }
450  else
451  for (i=0; i<chrDstW; i++) {
452  int u = chrDither[i & 7] << 12;
453  int v = chrDither[(i + 3) & 7] << 12;
454  int j;
455  for (j=0; j<chrFilterSize; j++) {
456  u += chrUSrc[j][i] * chrFilter[j];
457  v += chrVSrc[j][i] * chrFilter[j];
458  }
459 
460  dest[2*i]= av_clip_uint8(v>>19);
461  dest[2*i+1]= av_clip_uint8(u>>19);
462  }
463 }
464 
465 
466 #define output_pixel(pos, val) \
467  if (big_endian) { \
468  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits) << output_shift); \
469  } else { \
470  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits) << output_shift); \
471  }
472 
473 static void yuv2p01xl1_c(const int16_t *src,
474  uint16_t *dest, int dstW,
475  int big_endian, int output_bits)
476 {
477  int i;
478  int shift = 15 - output_bits;
479  int output_shift = 16 - output_bits;
480 
481  for (i = 0; i < dstW; i++) {
482  int val = src[i] + (1 << (shift - 1));
483  output_pixel(&dest[i], val);
484  }
485 }
486 
487 static void yuv2p01xlX_c(const int16_t *filter, int filterSize,
488  const int16_t **src, uint16_t *dest, int dstW,
489  int big_endian, int output_bits)
490 {
491  int i, j;
492  int shift = 11 + 16 - output_bits;
493  int output_shift = 16 - output_bits;
494 
495  for (i = 0; i < dstW; i++) {
496  int val = 1 << (shift - 1);
497 
498  for (j = 0; j < filterSize; j++)
499  val += src[j][i] * filter[j];
500 
501  output_pixel(&dest[i], val);
502  }
503 }
504 
505 static void yuv2p01xcX_c(int big_endian, const uint8_t *chrDither,
506  const int16_t *chrFilter, int chrFilterSize,
507  const int16_t **chrUSrc, const int16_t **chrVSrc,
508  uint8_t *dest8, int chrDstW, int output_bits)
509 {
510  uint16_t *dest = (uint16_t*)dest8;
511  int i, j;
512  int shift = 11 + 16 - output_bits;
513  int output_shift = 16 - output_bits;
514 
515  for (i = 0; i < chrDstW; i++) {
516  int u = 1 << (shift - 1);
517  int v = 1 << (shift - 1);
518 
519  for (j = 0; j < chrFilterSize; j++) {
520  u += chrUSrc[j][i] * chrFilter[j];
521  v += chrVSrc[j][i] * chrFilter[j];
522  }
523 
524  output_pixel(&dest[2*i] , u);
525  output_pixel(&dest[2*i+1], v);
526  }
527 }
528 
529 #undef output_pixel
530 
531 #define yuv2p01x_wrapper(bits) \
532  static void yuv2p0 ## bits ## l1_LE_c(const int16_t *src, \
533  uint8_t *dest, int dstW, \
534  const uint8_t *dither, int offset) \
535  { \
536  yuv2p01xl1_c(src, (uint16_t*)dest, dstW, 0, bits); \
537  } \
538  \
539  static void yuv2p0 ## bits ## l1_BE_c(const int16_t *src, \
540  uint8_t *dest, int dstW, \
541  const uint8_t *dither, int offset) \
542  { \
543  yuv2p01xl1_c(src, (uint16_t*)dest, dstW, 1, bits); \
544  } \
545  \
546  static void yuv2p0 ## bits ## lX_LE_c(const int16_t *filter, \
547  int filterSize, const int16_t **src, \
548  uint8_t *dest, int dstW, \
549  const uint8_t *dither, int offset) \
550  { \
551  yuv2p01xlX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0, bits); \
552  } \
553  \
554  static void yuv2p0 ## bits ## lX_BE_c(const int16_t *filter, \
555  int filterSize, const int16_t **src, \
556  uint8_t *dest, int dstW, \
557  const uint8_t *dither, int offset) \
558  { \
559  yuv2p01xlX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1, bits); \
560  } \
561  \
562  static void yuv2p0 ## bits ## cX_LE_c(enum AVPixelFormat dstFormat, \
563  const uint8_t *chrDither, \
564  const int16_t *chrFilter, \
565  int chrFilterSize, \
566  const int16_t **chrUSrc, \
567  const int16_t **chrVSrc, \
568  uint8_t *dest8, int chrDstW) \
569  { \
570  yuv2p01xcX_c(0, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, \
571  dest8, chrDstW, bits); \
572  } \
573  \
574  static void yuv2p0 ## bits ## cX_BE_c(enum AVPixelFormat dstFormat, \
575  const uint8_t *chrDither, \
576  const int16_t *chrFilter, \
577  int chrFilterSize, \
578  const int16_t **chrUSrc, \
579  const int16_t **chrVSrc, \
580  uint8_t *dest8, int chrDstW) \
581  { \
582  yuv2p01xcX_c(1, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, \
583  dest8, chrDstW, bits); \
584  }
585 
588 
589 #define accumulate_bit(acc, val) \
590  acc <<= 1; \
591  acc |= (val) >= 234
592 #define output_pixel(pos, acc) \
593  if (target == AV_PIX_FMT_MONOBLACK) { \
594  pos = acc; \
595  } else { \
596  pos = ~acc; \
597  }
598 
599 static av_always_inline void
600 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
601  const int16_t **lumSrc, int lumFilterSize,
602  const int16_t *chrFilter, const int16_t **chrUSrc,
603  const int16_t **chrVSrc, int chrFilterSize,
604  const int16_t **alpSrc, uint8_t *dest, int dstW,
605  int y, enum AVPixelFormat target)
606 {
607  const uint8_t * const d128 = ff_dither_8x8_220[y&7];
608  int i;
609  unsigned acc = 0;
610  int err = 0;
611 
612  for (i = 0; i < dstW; i += 2) {
613  int j;
614  int Y1 = 1 << 18;
615  int Y2 = 1 << 18;
616 
617  for (j = 0; j < lumFilterSize; j++) {
618  Y1 += lumSrc[j][i] * lumFilter[j];
619  Y2 += lumSrc[j][i+1] * lumFilter[j];
620  }
621  Y1 >>= 19;
622  Y2 >>= 19;
623  if ((Y1 | Y2) & 0x100) {
624  Y1 = av_clip_uint8(Y1);
625  Y2 = av_clip_uint8(Y2);
626  }
627  if (c->dither == SWS_DITHER_ED) {
628  Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
629  c->dither_error[0][i] = err;
630  acc = 2*acc + (Y1 >= 128);
631  Y1 -= 220*(acc&1);
632 
633  err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
634  c->dither_error[0][i+1] = Y1;
635  acc = 2*acc + (err >= 128);
636  err -= 220*(acc&1);
637  } else {
638  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
639  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
640  }
641  if ((i & 7) == 6) {
642  output_pixel(*dest++, acc);
643  }
644  }
645  c->dither_error[0][i] = err;
646 
647  if (i & 6) {
648  output_pixel(*dest, acc);
649  }
650 }
651 
652 static av_always_inline void
653 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
654  const int16_t *ubuf[2], const int16_t *vbuf[2],
655  const int16_t *abuf[2], uint8_t *dest, int dstW,
656  int yalpha, int uvalpha, int y,
657  enum AVPixelFormat target)
658 {
659  const int16_t *buf0 = buf[0], *buf1 = buf[1];
660  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
661  int yalpha1 = 4096 - yalpha;
662  int i;
663  av_assert2(yalpha <= 4096U);
664 
665  if (c->dither == SWS_DITHER_ED) {
666  int err = 0;
667  int acc = 0;
668  for (i = 0; i < dstW; i +=2) {
669  int Y;
670 
671  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
672  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
673  c->dither_error[0][i] = err;
674  acc = 2*acc + (Y >= 128);
675  Y -= 220*(acc&1);
676 
677  err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
678  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
679  c->dither_error[0][i+1] = Y;
680  acc = 2*acc + (err >= 128);
681  err -= 220*(acc&1);
682 
683  if ((i & 7) == 6)
684  output_pixel(*dest++, acc);
685  }
686  c->dither_error[0][i] = err;
687  } else {
688  for (i = 0; i < dstW; i += 8) {
689  int Y, acc = 0;
690 
691  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
692  accumulate_bit(acc, Y + d128[0]);
693  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
694  accumulate_bit(acc, Y + d128[1]);
695  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
696  accumulate_bit(acc, Y + d128[2]);
697  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
698  accumulate_bit(acc, Y + d128[3]);
699  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
700  accumulate_bit(acc, Y + d128[4]);
701  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
702  accumulate_bit(acc, Y + d128[5]);
703  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
704  accumulate_bit(acc, Y + d128[6]);
705  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
706  accumulate_bit(acc, Y + d128[7]);
707 
708  output_pixel(*dest++, acc);
709  }
710  }
711 }
712 
713 static av_always_inline void
714 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
715  const int16_t *ubuf[2], const int16_t *vbuf[2],
716  const int16_t *abuf0, uint8_t *dest, int dstW,
717  int uvalpha, int y, enum AVPixelFormat target)
718 {
719  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
720  int i;
721 
722  if (c->dither == SWS_DITHER_ED) {
723  int err = 0;
724  int acc = 0;
725  for (i = 0; i < dstW; i +=2) {
726  int Y;
727 
728  Y = ((buf0[i + 0] + 64) >> 7);
729  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
730  c->dither_error[0][i] = err;
731  acc = 2*acc + (Y >= 128);
732  Y -= 220*(acc&1);
733 
734  err = ((buf0[i + 1] + 64) >> 7);
735  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
736  c->dither_error[0][i+1] = Y;
737  acc = 2*acc + (err >= 128);
738  err -= 220*(acc&1);
739 
740  if ((i & 7) == 6)
741  output_pixel(*dest++, acc);
742  }
743  c->dither_error[0][i] = err;
744  } else {
745  for (i = 0; i < dstW; i += 8) {
746  int acc = 0;
747  accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
748  accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
749  accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
750  accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
751  accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
752  accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
753  accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
754  accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
755 
756  output_pixel(*dest++, acc);
757  }
758  }
759 }
760 
761 #undef output_pixel
762 #undef accumulate_bit
763 
764 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
765 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
766  const int16_t **lumSrc, int lumFilterSize, \
767  const int16_t *chrFilter, const int16_t **chrUSrc, \
768  const int16_t **chrVSrc, int chrFilterSize, \
769  const int16_t **alpSrc, uint8_t *dest, int dstW, \
770  int y) \
771 { \
772  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
773  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
774  alpSrc, dest, dstW, y, fmt); \
775 } \
776  \
777 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
778  const int16_t *ubuf[2], const int16_t *vbuf[2], \
779  const int16_t *abuf[2], uint8_t *dest, int dstW, \
780  int yalpha, int uvalpha, int y) \
781 { \
782  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
783  dest, dstW, yalpha, uvalpha, y, fmt); \
784 } \
785  \
786 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
787  const int16_t *ubuf[2], const int16_t *vbuf[2], \
788  const int16_t *abuf0, uint8_t *dest, int dstW, \
789  int uvalpha, int y) \
790 { \
791  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
792  abuf0, dest, dstW, uvalpha, \
793  y, fmt); \
794 }
795 
796 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
797 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
798 
799 #define output_pixels(pos, Y1, U, Y2, V) \
800  if (target == AV_PIX_FMT_YUYV422) { \
801  dest[pos + 0] = Y1; \
802  dest[pos + 1] = U; \
803  dest[pos + 2] = Y2; \
804  dest[pos + 3] = V; \
805  } else if (target == AV_PIX_FMT_YVYU422) { \
806  dest[pos + 0] = Y1; \
807  dest[pos + 1] = V; \
808  dest[pos + 2] = Y2; \
809  dest[pos + 3] = U; \
810  } else { /* AV_PIX_FMT_UYVY422 */ \
811  dest[pos + 0] = U; \
812  dest[pos + 1] = Y1; \
813  dest[pos + 2] = V; \
814  dest[pos + 3] = Y2; \
815  }
816 
817 static av_always_inline void
818 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
819  const int16_t **lumSrc, int lumFilterSize,
820  const int16_t *chrFilter, const int16_t **chrUSrc,
821  const int16_t **chrVSrc, int chrFilterSize,
822  const int16_t **alpSrc, uint8_t *dest, int dstW,
823  int y, enum AVPixelFormat target)
824 {
825  int i;
826 
827  for (i = 0; i < ((dstW + 1) >> 1); i++) {
828  int j;
829  int Y1 = 1 << 18;
830  int Y2 = 1 << 18;
831  int U = 1 << 18;
832  int V = 1 << 18;
833 
834  for (j = 0; j < lumFilterSize; j++) {
835  Y1 += lumSrc[j][i * 2] * lumFilter[j];
836  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
837  }
838  for (j = 0; j < chrFilterSize; j++) {
839  U += chrUSrc[j][i] * chrFilter[j];
840  V += chrVSrc[j][i] * chrFilter[j];
841  }
842  Y1 >>= 19;
843  Y2 >>= 19;
844  U >>= 19;
845  V >>= 19;
846  if ((Y1 | Y2 | U | V) & 0x100) {
847  Y1 = av_clip_uint8(Y1);
848  Y2 = av_clip_uint8(Y2);
849  U = av_clip_uint8(U);
850  V = av_clip_uint8(V);
851  }
852  output_pixels(4*i, Y1, U, Y2, V);
853  }
854 }
855 
856 static av_always_inline void
857 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
858  const int16_t *ubuf[2], const int16_t *vbuf[2],
859  const int16_t *abuf[2], uint8_t *dest, int dstW,
860  int yalpha, int uvalpha, int y,
861  enum AVPixelFormat target)
862 {
863  const int16_t *buf0 = buf[0], *buf1 = buf[1],
864  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
865  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
866  int yalpha1 = 4096 - yalpha;
867  int uvalpha1 = 4096 - uvalpha;
868  int i;
869  av_assert2(yalpha <= 4096U);
870  av_assert2(uvalpha <= 4096U);
871 
872  for (i = 0; i < ((dstW + 1) >> 1); i++) {
873  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
874  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
875  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
876  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
877 
878  if ((Y1 | Y2 | U | V) & 0x100) {
879  Y1 = av_clip_uint8(Y1);
880  Y2 = av_clip_uint8(Y2);
881  U = av_clip_uint8(U);
882  V = av_clip_uint8(V);
883  }
884 
885  output_pixels(i * 4, Y1, U, Y2, V);
886  }
887 }
888 
889 static av_always_inline void
890 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
891  const int16_t *ubuf[2], const int16_t *vbuf[2],
892  const int16_t *abuf0, uint8_t *dest, int dstW,
893  int uvalpha, int y, enum AVPixelFormat target)
894 {
895  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
896  int i;
897 
898  if (uvalpha < 2048) {
899  for (i = 0; i < ((dstW + 1) >> 1); i++) {
900  int Y1 = (buf0[i * 2 ]+64) >> 7;
901  int Y2 = (buf0[i * 2 + 1]+64) >> 7;
902  int U = (ubuf0[i] +64) >> 7;
903  int V = (vbuf0[i] +64) >> 7;
904 
905  if ((Y1 | Y2 | U | V) & 0x100) {
906  Y1 = av_clip_uint8(Y1);
907  Y2 = av_clip_uint8(Y2);
908  U = av_clip_uint8(U);
909  V = av_clip_uint8(V);
910  }
911 
912  output_pixels(i * 4, Y1, U, Y2, V);
913  }
914  } else {
915  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
916  for (i = 0; i < ((dstW + 1) >> 1); i++) {
917  int Y1 = (buf0[i * 2 ] + 64) >> 7;
918  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
919  int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
920  int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
921 
922  if ((Y1 | Y2 | U | V) & 0x100) {
923  Y1 = av_clip_uint8(Y1);
924  Y2 = av_clip_uint8(Y2);
925  U = av_clip_uint8(U);
926  V = av_clip_uint8(V);
927  }
928 
929  output_pixels(i * 4, Y1, U, Y2, V);
930  }
931  }
932 }
933 
934 #undef output_pixels
935 
936 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
937 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
938 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
939 
940 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
941 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
942 #define output_pixel(pos, val) \
943  if (is_be) { \
944  AV_WB16(pos, val); \
945  } else { \
946  AV_WL16(pos, val); \
947  }
948 
949 static av_always_inline void
950 yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter,
951  const int32_t **lumSrc, int lumFilterSize,
952  const int16_t *chrFilter, const int32_t **unused_chrUSrc,
953  const int32_t **unused_chrVSrc, int unused_chrFilterSize,
954  const int32_t **alpSrc, uint16_t *dest, int dstW,
955  int y, enum AVPixelFormat target,
956  int unused_hasAlpha, int unused_eightbytes, int is_be)
957 {
958  int hasAlpha = !!alpSrc;
959  int i;
960 
961  for (i = 0; i < dstW; i++) {
962  int j;
963  int Y = -0x40000000;
964  int A = 0xffff;
965 
966  for (j = 0; j < lumFilterSize; j++)
967  Y += lumSrc[j][i] * lumFilter[j];
968 
969  Y >>= 15;
970  Y += (1<<3) + 0x8000;
971  Y = av_clip_uint16(Y);
972 
973  if (hasAlpha) {
974  A = -0x40000000 + (1<<14);
975  for (j = 0; j < lumFilterSize; j++)
976  A += alpSrc[j][i] * lumFilter[j];
977 
978  A >>= 15;
979  A += 0x8000;
980  A = av_clip_uint16(A);
981  }
982 
983  output_pixel(&dest[2 * i ], Y);
984  output_pixel(&dest[2 * i + 1], A);
985  }
986 }
987 
988 static av_always_inline void
990  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
991  const int32_t *abuf[2], uint16_t *dest, int dstW,
992  int yalpha, int unused_uvalpha, int y,
993  enum AVPixelFormat target, int unused_hasAlpha,
994  int unused_eightbytes, int is_be)
995 {
996  int hasAlpha = abuf && abuf[0] && abuf[1];
997  const int32_t *buf0 = buf[0], *buf1 = buf[1],
998  *abuf0 = hasAlpha ? abuf[0] : NULL,
999  *abuf1 = hasAlpha ? abuf[1] : NULL;
1000  int yalpha1 = 4096 - yalpha;
1001  int i;
1002 
1003  av_assert2(yalpha <= 4096U);
1004 
1005  for (i = 0; i < dstW; i++) {
1006  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 15;
1007  int A;
1008 
1009  Y = av_clip_uint16(Y);
1010 
1011  if (hasAlpha) {
1012  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 15;
1013  A = av_clip_uint16(A);
1014  }
1015 
1016  output_pixel(&dest[2 * i ], Y);
1017  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
1018  }
1019 }
1020 
1021 static av_always_inline void
1023  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
1024  const int32_t *abuf0, uint16_t *dest, int dstW,
1025  int unused_uvalpha, int y, enum AVPixelFormat target,
1026  int unused_hasAlpha, int unused_eightbytes, int is_be)
1027 {
1028  int hasAlpha = !!abuf0;
1029  int i;
1030 
1031  for (i = 0; i < dstW; i++) {
1032  int Y = buf0[i] >> 3;/* 19 - 16 */
1033  int A;
1034 
1035  Y = av_clip_uint16(Y);
1036 
1037  if (hasAlpha) {
1038  A = abuf0[i] >> 3;
1039  if (A & 0x100)
1040  A = av_clip_uint16(A);
1041  }
1042 
1043  output_pixel(&dest[2 * i ], Y);
1044  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
1045  }
1046 }
1047 
1048 static av_always_inline void
1049 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
1050  const int32_t **lumSrc, int lumFilterSize,
1051  const int16_t *chrFilter, const int32_t **chrUSrc,
1052  const int32_t **chrVSrc, int chrFilterSize,
1053  const int32_t **alpSrc, uint16_t *dest, int dstW,
1054  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes,
1055  int is_be)
1056 {
1057  int i;
1058  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1059 
1060  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1061  int j;
1062  unsigned Y1 = -0x40000000;
1063  unsigned Y2 = -0x40000000;
1064  int U = -(128 << 23); // 19
1065  int V = -(128 << 23);
1066  int R, G, B;
1067 
1068  for (j = 0; j < lumFilterSize; j++) {
1069  Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
1070  Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1071  }
1072  for (j = 0; j < chrFilterSize; j++) {;
1073  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1074  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1075  }
1076 
1077  if (hasAlpha) {
1078  A1 = -0x40000000;
1079  A2 = -0x40000000;
1080  for (j = 0; j < lumFilterSize; j++) {
1081  A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
1082  A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1083  }
1084  A1 >>= 1;
1085  A1 += 0x20002000;
1086  A2 >>= 1;
1087  A2 += 0x20002000;
1088  }
1089 
1090  // 8 bits: 12+15=27; 16 bits: 12+19=31
1091  Y1 = (int)Y1 >> 14; // 10
1092  Y1 += 0x10000;
1093  Y2 = (int)Y2 >> 14;
1094  Y2 += 0x10000;
1095  U >>= 14;
1096  V >>= 14;
1097 
1098  // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
1099  Y1 -= c->yuv2rgb_y_offset;
1100  Y2 -= c->yuv2rgb_y_offset;
1101  Y1 *= c->yuv2rgb_y_coeff;
1102  Y2 *= c->yuv2rgb_y_coeff;
1103  Y1 += (1 << 13) - (1 << 29); // 21
1104  Y2 += (1 << 13) - (1 << 29);
1105  // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
1106 
1107  R = V * c->yuv2rgb_v2r_coeff;
1108  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1109  B = U * c->yuv2rgb_u2b_coeff;
1110 
1111  // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
1112  output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16));
1113  output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16));
1114  output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16));
1115  if (eightbytes) {
1116  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1117  output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1118  output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1119  output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1120  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1121  dest += 8;
1122  } else {
1123  output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1124  output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1125  output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1126  dest += 6;
1127  }
1128  }
1129 }
1130 
1131 static av_always_inline void
1133  const int32_t *ubuf[2], const int32_t *vbuf[2],
1134  const int32_t *abuf[2], uint16_t *dest, int dstW,
1135  int yalpha, int uvalpha, int y,
1136  enum AVPixelFormat target, int hasAlpha, int eightbytes,
1137  int is_be)
1138 {
1139  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1140  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1141  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1142  *abuf0 = hasAlpha ? abuf[0] : NULL,
1143  *abuf1 = hasAlpha ? abuf[1] : NULL;
1144  int yalpha1 = 4096 - yalpha;
1145  int uvalpha1 = 4096 - uvalpha;
1146  int i;
1147  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1148 
1149  av_assert2(yalpha <= 4096U);
1150  av_assert2(uvalpha <= 4096U);
1151 
1152  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1153  unsigned Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
1154  unsigned Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
1155  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1156  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1157  int R, G, B;
1158 
1159  Y1 -= c->yuv2rgb_y_offset;
1160  Y2 -= c->yuv2rgb_y_offset;
1161  Y1 *= c->yuv2rgb_y_coeff;
1162  Y2 *= c->yuv2rgb_y_coeff;
1163  Y1 += (1 << 13) - (1 << 29);
1164  Y2 += (1 << 13) - (1 << 29);
1165 
1166  R = V * c->yuv2rgb_v2r_coeff;
1167  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1168  B = U * c->yuv2rgb_u2b_coeff;
1169 
1170  if (hasAlpha) {
1171  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
1172  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
1173 
1174  A1 += 1 << 13;
1175  A2 += 1 << 13;
1176  }
1177 
1178  output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16));
1179  output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16));
1180  output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16));
1181  if (eightbytes) {
1182  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1183  output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1184  output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1185  output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1186  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1187  dest += 8;
1188  } else {
1189  output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1190  output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1191  output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1192  dest += 6;
1193  }
1194  }
1195 }
1196 
1197 static av_always_inline void
1199  const int32_t *ubuf[2], const int32_t *vbuf[2],
1200  const int32_t *abuf0, uint16_t *dest, int dstW,
1201  int uvalpha, int y, enum AVPixelFormat target,
1202  int hasAlpha, int eightbytes, int is_be)
1203 {
1204  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1205  int i;
1206  int A1 = 0xffff<<14, A2= 0xffff<<14;
1207 
1208  if (uvalpha < 2048) {
1209  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1210  SUINT Y1 = (buf0[i * 2] ) >> 2;
1211  SUINT Y2 = (buf0[i * 2 + 1]) >> 2;
1212  int U = (ubuf0[i] - (128 << 11)) >> 2;
1213  int V = (vbuf0[i] - (128 << 11)) >> 2;
1214  int R, G, B;
1215 
1216  Y1 -= c->yuv2rgb_y_offset;
1217  Y2 -= c->yuv2rgb_y_offset;
1218  Y1 *= c->yuv2rgb_y_coeff;
1219  Y2 *= c->yuv2rgb_y_coeff;
1220  Y1 += (1 << 13) - (1 << 29);
1221  Y2 += (1 << 13) - (1 << 29);
1222 
1223  if (hasAlpha) {
1224  A1 = abuf0[i * 2 ] * (1 << 11);
1225  A2 = abuf0[i * 2 + 1] * (1 << 11);
1226 
1227  A1 += 1 << 13;
1228  A2 += 1 << 13;
1229  }
1230 
1231  R = V * c->yuv2rgb_v2r_coeff;
1232  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1233  B = U * c->yuv2rgb_u2b_coeff;
1234 
1235  output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16));
1236  output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16));
1237  output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16));
1238  if (eightbytes) {
1239  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1240  output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1241  output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1242  output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1243  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1244  dest += 8;
1245  } else {
1246  output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1247  output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1248  output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1249  dest += 6;
1250  }
1251  }
1252  } else {
1253  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1254  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1255  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1256  SUINT Y1 = (buf0[i * 2] ) >> 2;
1257  SUINT Y2 = (buf0[i * 2 + 1]) >> 2;
1258  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1259  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1260  int R, G, B;
1261 
1262  Y1 -= c->yuv2rgb_y_offset;
1263  Y2 -= c->yuv2rgb_y_offset;
1264  Y1 *= c->yuv2rgb_y_coeff;
1265  Y2 *= c->yuv2rgb_y_coeff;
1266  Y1 += (1 << 13) - (1 << 29);
1267  Y2 += (1 << 13) - (1 << 29);
1268 
1269  if (hasAlpha) {
1270  A1 = abuf0[i * 2 ] * (1 << 11);
1271  A2 = abuf0[i * 2 + 1] * (1 << 11);
1272 
1273  A1 += 1 << 13;
1274  A2 += 1 << 13;
1275  }
1276 
1277  R = V * c->yuv2rgb_v2r_coeff;
1278  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1279  B = U * c->yuv2rgb_u2b_coeff;
1280 
1281  output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16));
1282  output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16));
1283  output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16));
1284  if (eightbytes) {
1285  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1286  output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1287  output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1288  output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1289  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1290  dest += 8;
1291  } else {
1292  output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16));
1293  output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16));
1294  output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16));
1295  dest += 6;
1296  }
1297  }
1298  }
1299 }
1300 
1301 static av_always_inline void
1302 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1303  const int32_t **lumSrc, int lumFilterSize,
1304  const int16_t *chrFilter, const int32_t **chrUSrc,
1305  const int32_t **chrVSrc, int chrFilterSize,
1306  const int32_t **alpSrc, uint16_t *dest, int dstW,
1307  int y, enum AVPixelFormat target, int hasAlpha,
1308  int eightbytes, int is_be)
1309 {
1310  int i;
1311  int A = 0xffff<<14;
1312 
1313  for (i = 0; i < dstW; i++) {
1314  int j;
1315  int Y = -0x40000000;
1316  int U = -(128 << 23); // 19
1317  int V = -(128 << 23);
1318  int R, G, B;
1319 
1320  for (j = 0; j < lumFilterSize; j++) {
1321  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
1322  }
1323  for (j = 0; j < chrFilterSize; j++) {;
1324  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1325  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1326  }
1327 
1328  if (hasAlpha) {
1329  A = -0x40000000;
1330  for (j = 0; j < lumFilterSize; j++) {
1331  A += alpSrc[j][i] * (unsigned)lumFilter[j];
1332  }
1333  A >>= 1;
1334  A += 0x20002000;
1335  }
1336 
1337  // 8bit: 12+15=27; 16-bit: 12+19=31
1338  Y >>= 14; // 10
1339  Y += 0x10000;
1340  U >>= 14;
1341  V >>= 14;
1342 
1343  // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
1344  Y -= c->yuv2rgb_y_offset;
1345  Y *= c->yuv2rgb_y_coeff;
1346  Y += (1 << 13) - (1<<29); // 21
1347  // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
1348 
1349  R = V * c->yuv2rgb_v2r_coeff;
1350  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1351  B = U * c->yuv2rgb_u2b_coeff;
1352 
1353  // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
1354  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y)>>14) + (1<<15), 16));
1355  output_pixel(&dest[1], av_clip_uintp2((( G + Y)>>14) + (1<<15), 16));
1356  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y)>>14) + (1<<15), 16));
1357  if (eightbytes) {
1358  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1359  dest += 4;
1360  } else {
1361  dest += 3;
1362  }
1363  }
1364 }
1365 
1366 static av_always_inline void
1368  const int32_t *ubuf[2], const int32_t *vbuf[2],
1369  const int32_t *abuf[2], uint16_t *dest, int dstW,
1370  int yalpha, int uvalpha, int y,
1371  enum AVPixelFormat target, int hasAlpha, int eightbytes,
1372  int is_be)
1373 {
1374  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1375  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1376  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1377  *abuf0 = hasAlpha ? abuf[0] : NULL,
1378  *abuf1 = hasAlpha ? abuf[1] : NULL;
1379  int yalpha1 = 4096 - yalpha;
1380  int uvalpha1 = 4096 - uvalpha;
1381  int i;
1382  int A = 0xffff<<14;
1383 
1384  av_assert2(yalpha <= 4096U);
1385  av_assert2(uvalpha <= 4096U);
1386 
1387  for (i = 0; i < dstW; i++) {
1388  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1389  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1390  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1391  int R, G, B;
1392 
1393  Y -= c->yuv2rgb_y_offset;
1394  Y *= c->yuv2rgb_y_coeff;
1395  Y += (1 << 13) - (1 << 29);
1396 
1397  R = V * c->yuv2rgb_v2r_coeff;
1398  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1399  B = U * c->yuv2rgb_u2b_coeff;
1400 
1401  if (hasAlpha) {
1402  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1403 
1404  A += 1 << 13;
1405  }
1406 
1407  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y) >> 14) + (1<<15), 16));
1408  output_pixel(&dest[1], av_clip_uintp2((( G + Y) >> 14) + (1<<15), 16));
1409  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y) >> 14) + (1<<15), 16));
1410  if (eightbytes) {
1411  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1412  dest += 4;
1413  } else {
1414  dest += 3;
1415  }
1416  }
1417 }
1418 
1419 static av_always_inline void
1421  const int32_t *ubuf[2], const int32_t *vbuf[2],
1422  const int32_t *abuf0, uint16_t *dest, int dstW,
1423  int uvalpha, int y, enum AVPixelFormat target,
1424  int hasAlpha, int eightbytes, int is_be)
1425 {
1426  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1427  int i;
1428  int A = 0xffff<<14;
1429 
1430  if (uvalpha < 2048) {
1431  for (i = 0; i < dstW; i++) {
1432  SUINT Y = (buf0[i]) >> 2;
1433  int U = (ubuf0[i] - (128 << 11)) >> 2;
1434  int V = (vbuf0[i] - (128 << 11)) >> 2;
1435  int R, G, B;
1436 
1437  Y -= c->yuv2rgb_y_offset;
1438  Y *= c->yuv2rgb_y_coeff;
1439  Y += (1 << 13) - (1 << 29);
1440 
1441  if (hasAlpha) {
1442  A = abuf0[i] * (1 << 11);
1443 
1444  A += 1 << 13;
1445  }
1446 
1447  R = V * c->yuv2rgb_v2r_coeff;
1448  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1449  B = U * c->yuv2rgb_u2b_coeff;
1450 
1451  output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y) >> 14) + (1<<15), 16));
1452  output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y) >> 14) + (1<<15), 16));
1453  output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y) >> 14) + (1<<15), 16));
1454  if (eightbytes) {
1455  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1456  dest += 4;
1457  } else {
1458  dest += 3;
1459  }
1460  }
1461  } else {
1462  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1463  int A = 0xffff<<14;
1464  for (i = 0; i < dstW; i++) {
1465  SUINT Y = (buf0[i] ) >> 2;
1466  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1467  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1468  int R, G, B;
1469 
1470  Y -= c->yuv2rgb_y_offset;
1471  Y *= c->yuv2rgb_y_coeff;
1472  Y += (1 << 13) - (1 << 29);
1473 
1474  if (hasAlpha) {
1475  A = abuf0[i] * (1 << 11);
1476 
1477  A += 1 << 13;
1478  }
1479 
1480  R = V * c->yuv2rgb_v2r_coeff;
1481  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1482  B = U * c->yuv2rgb_u2b_coeff;
1483 
1484  output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y) >> 14) + (1<<15), 16));
1485  output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y) >> 14) + (1<<15), 16));
1486  output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y) >> 14) + (1<<15), 16));
1487  if (eightbytes) {
1488  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1489  dest += 4;
1490  } else {
1491  dest += 3;
1492  }
1493  }
1494  }
1495 }
1496 
1497 #undef output_pixel
1498 #undef r_b
1499 #undef b_r
1500 
1501 #define YUV2PACKED16WRAPPER_EXT(name, base, ext, fmt, is_be, hasAlpha, eightbytes) \
1502 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1503  const int16_t **_lumSrc, int lumFilterSize, \
1504  const int16_t *chrFilter, const int16_t **_chrUSrc, \
1505  const int16_t **_chrVSrc, int chrFilterSize, \
1506  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1507  int y) \
1508 { \
1509  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1510  **chrUSrc = (const int32_t **) _chrUSrc, \
1511  **chrVSrc = (const int32_t **) _chrVSrc, \
1512  **alpSrc = (const int32_t **) _alpSrc; \
1513  uint16_t *dest = (uint16_t *) _dest; \
1514  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1515  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1516  alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes, is_be); \
1517 } \
1518  \
1519 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1520  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1521  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1522  int yalpha, int uvalpha, int y) \
1523 { \
1524  const int32_t **buf = (const int32_t **) _buf, \
1525  **ubuf = (const int32_t **) _ubuf, \
1526  **vbuf = (const int32_t **) _vbuf, \
1527  **abuf = (const int32_t **) _abuf; \
1528  uint16_t *dest = (uint16_t *) _dest; \
1529  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1530  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes, is_be); \
1531 } \
1532  \
1533 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1534  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1535  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1536  int uvalpha, int y) \
1537 { \
1538  const int32_t *buf0 = (const int32_t *) _buf0, \
1539  **ubuf = (const int32_t **) _ubuf, \
1540  **vbuf = (const int32_t **) _vbuf, \
1541  *abuf0 = (const int32_t *) _abuf0; \
1542  uint16_t *dest = (uint16_t *) _dest; \
1543  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1544  dstW, uvalpha, y, fmt, hasAlpha, eightbytes, is_be); \
1545 }
1546 #define YUV2PACKED16WRAPPER(name, base, ext, base_fmt, endianness, hasAlpha, eightbytes) \
1547  YUV2PACKED16WRAPPER_EXT(name, base, ext, base_fmt ## endianness, IS_BE(endianness), hasAlpha, eightbytes)
1548 
1549 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48, BE, 0, 0)
1550 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48, LE, 0, 0)
1551 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48, BE, 0, 0)
1552 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48, LE, 0, 0)
1553 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64, BE, 1, 1)
1554 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64, LE, 1, 1)
1555 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64, BE, 0, 1)
1556 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64, LE, 0, 1)
1557 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64, BE, 1, 1)
1558 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64, LE, 1, 1)
1559 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64, BE, 0, 1)
1560 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64, LE, 0, 1)
1561 YUV2PACKED16WRAPPER(yuv2, ya16, ya16be, AV_PIX_FMT_YA16, BE, 1, 0)
1562 YUV2PACKED16WRAPPER(yuv2, ya16, ya16le, AV_PIX_FMT_YA16, LE, 1, 0)
1563 
1564 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48, BE, 0, 0)
1565 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48, LE, 0, 0)
1566 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48, BE, 0, 0)
1567 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48, LE, 0, 0)
1568 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64, BE, 1, 1)
1569 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64, LE, 1, 1)
1570 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64, BE, 0, 1)
1571 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64, LE, 0, 1)
1572 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64, BE, 1, 1)
1573 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64, LE, 1, 1)
1574 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64, BE, 0, 1)
1575 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64, LE, 0, 1)
1576 
1577 /*
1578  * Write out 2 RGB pixels in the target pixel format. This function takes a
1579  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1580  * things like endianness conversion and shifting. The caller takes care of
1581  * setting the correct offset in these tables from the chroma (U/V) values.
1582  * This function then uses the luminance (Y1/Y2) values to write out the
1583  * correct RGB values into the destination buffer.
1584  */
1585 static av_always_inline void
1586 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1587  unsigned A1, unsigned A2,
1588  const void *_r, const void *_g, const void *_b, int y,
1589  enum AVPixelFormat target, int hasAlpha)
1590 {
1591  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1592  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1593  uint32_t *dest = (uint32_t *) _dest;
1594  const uint32_t *r = (const uint32_t *) _r;
1595  const uint32_t *g = (const uint32_t *) _g;
1596  const uint32_t *b = (const uint32_t *) _b;
1597 
1598 #if CONFIG_SMALL
1599  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1600 
1601  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1602  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1603 #else
1604  if (hasAlpha) {
1605  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1606 
1607  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1608  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1609  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1610  } else {
1611 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1612  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1613 
1614  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1615 #endif
1616  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1617  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1618  }
1619 #endif
1620  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1621  uint8_t *dest = (uint8_t *) _dest;
1622  const uint8_t *r = (const uint8_t *) _r;
1623  const uint8_t *g = (const uint8_t *) _g;
1624  const uint8_t *b = (const uint8_t *) _b;
1625 
1626 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1627 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1628 
1629  dest[i * 6 + 0] = r_b[Y1];
1630  dest[i * 6 + 1] = g[Y1];
1631  dest[i * 6 + 2] = b_r[Y1];
1632  dest[i * 6 + 3] = r_b[Y2];
1633  dest[i * 6 + 4] = g[Y2];
1634  dest[i * 6 + 5] = b_r[Y2];
1635 #undef r_b
1636 #undef b_r
1637  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1638  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1639  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1640  uint16_t *dest = (uint16_t *) _dest;
1641  const uint16_t *r = (const uint16_t *) _r;
1642  const uint16_t *g = (const uint16_t *) _g;
1643  const uint16_t *b = (const uint16_t *) _b;
1644  int dr1, dg1, db1, dr2, dg2, db2;
1645 
1646  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1647  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1648  dg1 = ff_dither_2x2_4[ y & 1 ][0];
1649  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1650  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1651  dg2 = ff_dither_2x2_4[ y & 1 ][1];
1652  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1653  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1654  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1655  dg1 = ff_dither_2x2_8[ y & 1 ][1];
1656  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1657  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1658  dg2 = ff_dither_2x2_8[ y & 1 ][0];
1659  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1660  } else {
1661  dr1 = ff_dither_4x4_16[ y & 3 ][0];
1662  dg1 = ff_dither_4x4_16[ y & 3 ][1];
1663  db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1664  dr2 = ff_dither_4x4_16[ y & 3 ][1];
1665  dg2 = ff_dither_4x4_16[ y & 3 ][0];
1666  db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1667  }
1668 
1669  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1670  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1671  } else if (target == AV_PIX_FMT_X2RGB10 || target == AV_PIX_FMT_X2BGR10) {
1672  uint32_t *dest = (uint32_t *) _dest;
1673  const uint32_t *r = (const uint32_t *) _r;
1674  const uint32_t *g = (const uint32_t *) _g;
1675  const uint32_t *b = (const uint32_t *) _b;
1676  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1677  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1678  } else /* 8/4 bits */ {
1679  uint8_t *dest = (uint8_t *) _dest;
1680  const uint8_t *r = (const uint8_t *) _r;
1681  const uint8_t *g = (const uint8_t *) _g;
1682  const uint8_t *b = (const uint8_t *) _b;
1683  int dr1, dg1, db1, dr2, dg2, db2;
1684 
1685  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1686  const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1687  const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1688  dr1 = dg1 = d32[(i * 2 + 0) & 7];
1689  db1 = d64[(i * 2 + 0) & 7];
1690  dr2 = dg2 = d32[(i * 2 + 1) & 7];
1691  db2 = d64[(i * 2 + 1) & 7];
1692  } else {
1693  const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1694  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1695  dr1 = db1 = d128[(i * 2 + 0) & 7];
1696  dg1 = d64[(i * 2 + 0) & 7];
1697  dr2 = db2 = d128[(i * 2 + 1) & 7];
1698  dg2 = d64[(i * 2 + 1) & 7];
1699  }
1700 
1701  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1702  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1703  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1704  } else {
1705  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1706  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1707  }
1708  }
1709 }
1710 
1711 static av_always_inline void
1712 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1713  const int16_t **lumSrc, int lumFilterSize,
1714  const int16_t *chrFilter, const int16_t **chrUSrc,
1715  const int16_t **chrVSrc, int chrFilterSize,
1716  const int16_t **alpSrc, uint8_t *dest, int dstW,
1717  int y, enum AVPixelFormat target, int hasAlpha)
1718 {
1719  int i;
1720 
1721  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1722  int j, A1, A2;
1723  int Y1 = 1 << 18;
1724  int Y2 = 1 << 18;
1725  int U = 1 << 18;
1726  int V = 1 << 18;
1727  const void *r, *g, *b;
1728 
1729  for (j = 0; j < lumFilterSize; j++) {
1730  Y1 += lumSrc[j][i * 2] * lumFilter[j];
1731  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1732  }
1733  for (j = 0; j < chrFilterSize; j++) {
1734  U += chrUSrc[j][i] * chrFilter[j];
1735  V += chrVSrc[j][i] * chrFilter[j];
1736  }
1737  Y1 >>= 19;
1738  Y2 >>= 19;
1739  U >>= 19;
1740  V >>= 19;
1741  if (hasAlpha) {
1742  A1 = 1 << 18;
1743  A2 = 1 << 18;
1744  for (j = 0; j < lumFilterSize; j++) {
1745  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1746  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1747  }
1748  A1 >>= 19;
1749  A2 >>= 19;
1750  if ((A1 | A2) & 0x100) {
1751  A1 = av_clip_uint8(A1);
1752  A2 = av_clip_uint8(A2);
1753  }
1754  }
1755 
1756  r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1757  g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]);
1758  b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1759 
1760  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1761  r, g, b, y, target, hasAlpha);
1762  }
1763 }
1764 
1765 static av_always_inline void
1766 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1767  const int16_t *ubuf[2], const int16_t *vbuf[2],
1768  const int16_t *abuf[2], uint8_t *dest, int dstW,
1769  int yalpha, int uvalpha, int y,
1770  enum AVPixelFormat target, int hasAlpha)
1771 {
1772  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1773  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1774  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1775  *abuf0 = hasAlpha ? abuf[0] : NULL,
1776  *abuf1 = hasAlpha ? abuf[1] : NULL;
1777  int yalpha1 = 4096 - yalpha;
1778  int uvalpha1 = 4096 - uvalpha;
1779  int i;
1780  av_assert2(yalpha <= 4096U);
1781  av_assert2(uvalpha <= 4096U);
1782 
1783  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1784  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1785  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1786  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1787  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1788  int A1, A2;
1789  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1790  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1791  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1792 
1793  if (hasAlpha) {
1794  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1795  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1796  A1 = av_clip_uint8(A1);
1797  A2 = av_clip_uint8(A2);
1798  }
1799 
1800  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1801  r, g, b, y, target, hasAlpha);
1802  }
1803 }
1804 
1805 static av_always_inline void
1806 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1807  const int16_t *ubuf[2], const int16_t *vbuf[2],
1808  const int16_t *abuf0, uint8_t *dest, int dstW,
1809  int uvalpha, int y, enum AVPixelFormat target,
1810  int hasAlpha)
1811 {
1812  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1813  int i;
1814 
1815  if (uvalpha < 2048) {
1816  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1817  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1818  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1819  int U = (ubuf0[i] + 64) >> 7;
1820  int V = (vbuf0[i] + 64) >> 7;
1821  int A1, A2;
1822  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1823  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1824  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1825 
1826  if (hasAlpha) {
1827  A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1828  A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1829  A1 = av_clip_uint8(A1);
1830  A2 = av_clip_uint8(A2);
1831  }
1832 
1833  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1834  r, g, b, y, target, hasAlpha);
1835  }
1836  } else {
1837  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1838  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1839  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1840  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1841  int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1842  int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1843  int A1, A2;
1844  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1845  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1846  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1847 
1848  if (hasAlpha) {
1849  A1 = (abuf0[i * 2 ] + 64) >> 7;
1850  A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1851  A1 = av_clip_uint8(A1);
1852  A2 = av_clip_uint8(A2);
1853  }
1854 
1855  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1856  r, g, b, y, target, hasAlpha);
1857  }
1858  }
1859 }
1860 
1861 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1862 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1863  const int16_t **lumSrc, int lumFilterSize, \
1864  const int16_t *chrFilter, const int16_t **chrUSrc, \
1865  const int16_t **chrVSrc, int chrFilterSize, \
1866  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1867  int y) \
1868 { \
1869  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1870  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1871  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1872 }
1873 
1874 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1875 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1876 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1877  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1878  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1879  int yalpha, int uvalpha, int y) \
1880 { \
1881  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1882  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1883 }
1884 
1885 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1886 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1887 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1888  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1889  const int16_t *abuf0, uint8_t *dest, int dstW, \
1890  int uvalpha, int y) \
1891 { \
1892  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1893  dstW, uvalpha, y, fmt, hasAlpha); \
1894 }
1895 
1896 #if CONFIG_SMALL
1897 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1898 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1899 #else
1900 #if CONFIG_SWSCALE_ALPHA
1903 #endif
1906 #endif
1907 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1908 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1915 YUV2RGBWRAPPER(yuv2, rgb, x2rgb10, AV_PIX_FMT_X2RGB10, 0)
1916 YUV2RGBWRAPPER(yuv2, rgb, x2bgr10, AV_PIX_FMT_X2BGR10, 0)
1917 
1919  uint8_t *dest, int i, int Y, int A, int U, int V,
1920  int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1921 {
1922  int R, G, B;
1923  int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1924 
1925  Y -= c->yuv2rgb_y_offset;
1926  Y *= c->yuv2rgb_y_coeff;
1927  Y += 1 << 21;
1928  R = (unsigned)Y + V*(unsigned)c->yuv2rgb_v2r_coeff;
1929  G = (unsigned)Y + V*(unsigned)c->yuv2rgb_v2g_coeff + U*(unsigned)c->yuv2rgb_u2g_coeff;
1930  B = (unsigned)Y + U*(unsigned)c->yuv2rgb_u2b_coeff;
1931  if ((R | G | B) & 0xC0000000) {
1932  R = av_clip_uintp2(R, 30);
1933  G = av_clip_uintp2(G, 30);
1934  B = av_clip_uintp2(B, 30);
1935  }
1936 
1937  switch(target) {
1938  case AV_PIX_FMT_ARGB:
1939  dest[0] = hasAlpha ? A : 255;
1940  dest[1] = R >> 22;
1941  dest[2] = G >> 22;
1942  dest[3] = B >> 22;
1943  break;
1944  case AV_PIX_FMT_RGB24:
1945  dest[0] = R >> 22;
1946  dest[1] = G >> 22;
1947  dest[2] = B >> 22;
1948  break;
1949  case AV_PIX_FMT_RGBA:
1950  dest[0] = R >> 22;
1951  dest[1] = G >> 22;
1952  dest[2] = B >> 22;
1953  dest[3] = hasAlpha ? A : 255;
1954  break;
1955  case AV_PIX_FMT_ABGR:
1956  dest[0] = hasAlpha ? A : 255;
1957  dest[1] = B >> 22;
1958  dest[2] = G >> 22;
1959  dest[3] = R >> 22;
1960  break;
1961  case AV_PIX_FMT_BGR24:
1962  dest[0] = B >> 22;
1963  dest[1] = G >> 22;
1964  dest[2] = R >> 22;
1965  break;
1966  case AV_PIX_FMT_BGRA:
1967  dest[0] = B >> 22;
1968  dest[1] = G >> 22;
1969  dest[2] = R >> 22;
1970  dest[3] = hasAlpha ? A : 255;
1971  break;
1972  case AV_PIX_FMT_BGR4_BYTE:
1973  case AV_PIX_FMT_RGB4_BYTE:
1974  case AV_PIX_FMT_BGR8:
1975  case AV_PIX_FMT_RGB8:
1976  {
1977  int r,g,b;
1978 
1979  switch (c->dither) {
1980  case SWS_DITHER_NONE:
1981  if (isrgb8) {
1982  r = av_clip_uintp2(R >> 27, 3);
1983  g = av_clip_uintp2(G >> 27, 3);
1984  b = av_clip_uintp2(B >> 28, 2);
1985  } else {
1986  r = av_clip_uintp2(R >> 29, 1);
1987  g = av_clip_uintp2(G >> 28, 2);
1988  b = av_clip_uintp2(B >> 29, 1);
1989  }
1990  break;
1991  default:
1992  case SWS_DITHER_AUTO:
1993  case SWS_DITHER_ED:
1994  R >>= 22;
1995  G >>= 22;
1996  B >>= 22;
1997  R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1998  G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1999  B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
2000  c->dither_error[0][i] = err[0];
2001  c->dither_error[1][i] = err[1];
2002  c->dither_error[2][i] = err[2];
2003  r = R >> (isrgb8 ? 5 : 7);
2004  g = G >> (isrgb8 ? 5 : 6);
2005  b = B >> (isrgb8 ? 6 : 7);
2006  r = av_clip(r, 0, isrgb8 ? 7 : 1);
2007  g = av_clip(g, 0, isrgb8 ? 7 : 3);
2008  b = av_clip(b, 0, isrgb8 ? 3 : 1);
2009  err[0] = R - r*(isrgb8 ? 36 : 255);
2010  err[1] = G - g*(isrgb8 ? 36 : 85);
2011  err[2] = B - b*(isrgb8 ? 85 : 255);
2012  break;
2013  case SWS_DITHER_A_DITHER:
2014  if (isrgb8) {
2015  /* see http://pippin.gimp.org/a_dither/ for details/origin */
2016 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
2017  r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
2018  g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
2019  b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
2020  r = av_clip_uintp2(r, 3);
2021  g = av_clip_uintp2(g, 3);
2022  b = av_clip_uintp2(b, 2);
2023  } else {
2024  r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
2025  g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
2026  b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
2027  r = av_clip_uintp2(r, 1);
2028  g = av_clip_uintp2(g, 2);
2029  b = av_clip_uintp2(b, 1);
2030  }
2031  break;
2032  case SWS_DITHER_X_DITHER:
2033  if (isrgb8) {
2034  /* see http://pippin.gimp.org/a_dither/ for details/origin */
2035 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
2036  r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
2037  g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
2038  b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
2039  r = av_clip_uintp2(r, 3);
2040  g = av_clip_uintp2(g, 3);
2041  b = av_clip_uintp2(b, 2);
2042  } else {
2043  r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
2044  g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
2045  b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
2046  r = av_clip_uintp2(r, 1);
2047  g = av_clip_uintp2(g, 2);
2048  b = av_clip_uintp2(b, 1);
2049  }
2050 
2051  break;
2052  }
2053 
2054  if(target == AV_PIX_FMT_BGR4_BYTE) {
2055  dest[0] = r + 2*g + 8*b;
2056  } else if(target == AV_PIX_FMT_RGB4_BYTE) {
2057  dest[0] = b + 2*g + 8*r;
2058  } else if(target == AV_PIX_FMT_BGR8) {
2059  dest[0] = r + 8*g + 64*b;
2060  } else if(target == AV_PIX_FMT_RGB8) {
2061  dest[0] = b + 4*g + 32*r;
2062  } else
2063  av_assert2(0);
2064  break;}
2065  }
2066 }
2067 
2068 static av_always_inline void
2069 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
2070  const int16_t **lumSrc, int lumFilterSize,
2071  const int16_t *chrFilter, const int16_t **chrUSrc,
2072  const int16_t **chrVSrc, int chrFilterSize,
2073  const int16_t **alpSrc, uint8_t *dest,
2074  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
2075 {
2076  int i;
2077  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2078  int err[4] = {0};
2079  int A = 0; //init to silence warning
2080 
2081  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2082  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2083  step = 1;
2084 
2085  for (i = 0; i < dstW; i++) {
2086  int j;
2087  int Y = 1<<9;
2088  int U = (1<<9)-(128 << 19);
2089  int V = (1<<9)-(128 << 19);
2090 
2091  for (j = 0; j < lumFilterSize; j++) {
2092  Y += lumSrc[j][i] * lumFilter[j];
2093  }
2094  for (j = 0; j < chrFilterSize; j++) {
2095  U += chrUSrc[j][i] * chrFilter[j];
2096  V += chrVSrc[j][i] * chrFilter[j];
2097  }
2098  Y >>= 10;
2099  U >>= 10;
2100  V >>= 10;
2101  if (hasAlpha) {
2102  A = 1 << 18;
2103  for (j = 0; j < lumFilterSize; j++) {
2104  A += alpSrc[j][i] * lumFilter[j];
2105  }
2106  A >>= 19;
2107  if (A & 0x100)
2108  A = av_clip_uint8(A);
2109  }
2110  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2111  dest += step;
2112  }
2113  c->dither_error[0][i] = err[0];
2114  c->dither_error[1][i] = err[1];
2115  c->dither_error[2][i] = err[2];
2116 }
2117 
2118 static av_always_inline void
2119 yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2],
2120  const int16_t *ubuf[2], const int16_t *vbuf[2],
2121  const int16_t *abuf[2], uint8_t *dest, int dstW,
2122  int yalpha, int uvalpha, int y,
2123  enum AVPixelFormat target, int hasAlpha)
2124 {
2125  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2126  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2127  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2128  *abuf0 = hasAlpha ? abuf[0] : NULL,
2129  *abuf1 = hasAlpha ? abuf[1] : NULL;
2130  int yalpha1 = 4096 - yalpha;
2131  int uvalpha1 = 4096 - uvalpha;
2132  int i;
2133  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2134  int err[4] = {0};
2135  int A = 0; // init to silcene warning
2136 
2137  av_assert2(yalpha <= 4096U);
2138  av_assert2(uvalpha <= 4096U);
2139 
2140  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2141  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2142  step = 1;
2143 
2144  for (i = 0; i < dstW; i++) {
2145  int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
2146  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
2147  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
2148 
2149  if (hasAlpha) {
2150  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
2151  if (A & 0x100)
2152  A = av_clip_uint8(A);
2153  }
2154 
2155  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2156  dest += step;
2157  }
2158  c->dither_error[0][i] = err[0];
2159  c->dither_error[1][i] = err[1];
2160  c->dither_error[2][i] = err[2];
2161 }
2162 
2163 static av_always_inline void
2165  const int16_t *ubuf[2], const int16_t *vbuf[2],
2166  const int16_t *abuf0, uint8_t *dest, int dstW,
2167  int uvalpha, int y, enum AVPixelFormat target,
2168  int hasAlpha)
2169 {
2170  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2171  int i;
2172  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2173  int err[4] = {0};
2174 
2175  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2176  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2177  step = 1;
2178 
2179  if (uvalpha < 2048) {
2180  int A = 0; //init to silence warning
2181  for (i = 0; i < dstW; i++) {
2182  int Y = buf0[i] * 4;
2183  int U = (ubuf0[i] - (128<<7)) * 4;
2184  int V = (vbuf0[i] - (128<<7)) * 4;
2185 
2186  if (hasAlpha) {
2187  A = (abuf0[i] + 64) >> 7;
2188  if (A & 0x100)
2189  A = av_clip_uint8(A);
2190  }
2191 
2192  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2193  dest += step;
2194  }
2195  } else {
2196  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2197  int A = 0; //init to silence warning
2198  for (i = 0; i < dstW; i++) {
2199  int Y = buf0[i] * 4;
2200  int U = (ubuf0[i] + ubuf1[i] - (128<<8)) * 2;
2201  int V = (vbuf0[i] + vbuf1[i] - (128<<8)) * 2;
2202 
2203  if (hasAlpha) {
2204  A = (abuf0[i] + 64) >> 7;
2205  if (A & 0x100)
2206  A = av_clip_uint8(A);
2207  }
2208 
2209  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2210  dest += step;
2211  }
2212  }
2213 
2214  c->dither_error[0][i] = err[0];
2215  c->dither_error[1][i] = err[1];
2216  c->dither_error[2][i] = err[2];
2217 }
2218 
2219 #if CONFIG_SMALL
2220 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2221 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2222 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2223 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2224 #else
2225 #if CONFIG_SWSCALE_ALPHA
2226 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
2227 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
2228 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
2229 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
2230 #endif
2231 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
2232 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
2233 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
2234 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
2235 #endif
2236 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
2237 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
2238 
2239 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
2240 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
2241 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
2242 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
2243 
2244 static void
2245 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
2246  const int16_t **lumSrc, int lumFilterSize,
2247  const int16_t *chrFilter, const int16_t **chrUSrc,
2248  const int16_t **chrVSrc, int chrFilterSize,
2249  const int16_t **alpSrc, uint8_t **dest,
2250  int dstW, int y)
2251 {
2252  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2253  int i;
2254  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
2255  uint16_t **dest16 = (uint16_t**)dest;
2256  int SH = 22 + 8 - desc->comp[0].depth;
2257  int A = 0; // init to silence warning
2258 
2259  for (i = 0; i < dstW; i++) {
2260  int j;
2261  int Y = 1 << 9;
2262  int U = (1 << 9) - (128 << 19);
2263  int V = (1 << 9) - (128 << 19);
2264  int R, G, B;
2265 
2266  for (j = 0; j < lumFilterSize; j++)
2267  Y += lumSrc[j][i] * lumFilter[j];
2268 
2269  for (j = 0; j < chrFilterSize; j++) {
2270  U += chrUSrc[j][i] * chrFilter[j];
2271  V += chrVSrc[j][i] * chrFilter[j];
2272  }
2273 
2274  Y >>= 10;
2275  U >>= 10;
2276  V >>= 10;
2277 
2278  if (hasAlpha) {
2279  A = 1 << 18;
2280 
2281  for (j = 0; j < lumFilterSize; j++)
2282  A += alpSrc[j][i] * lumFilter[j];
2283 
2284  if (A & 0xF8000000)
2285  A = av_clip_uintp2(A, 27);
2286  }
2287 
2288  Y -= c->yuv2rgb_y_offset;
2289  Y *= c->yuv2rgb_y_coeff;
2290  Y += 1 << (SH-1);
2291  R = Y + V * c->yuv2rgb_v2r_coeff;
2292  G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2293  B = Y + U * c->yuv2rgb_u2b_coeff;
2294 
2295  if ((R | G | B) & 0xC0000000) {
2296  R = av_clip_uintp2(R, 30);
2297  G = av_clip_uintp2(G, 30);
2298  B = av_clip_uintp2(B, 30);
2299  }
2300 
2301  if (SH != 22) {
2302  dest16[0][i] = G >> SH;
2303  dest16[1][i] = B >> SH;
2304  dest16[2][i] = R >> SH;
2305  if (hasAlpha)
2306  dest16[3][i] = A >> (SH - 3);
2307  } else {
2308  dest[0][i] = G >> 22;
2309  dest[1][i] = B >> 22;
2310  dest[2][i] = R >> 22;
2311  if (hasAlpha)
2312  dest[3][i] = A >> 19;
2313  }
2314  }
2315  if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2316  for (i = 0; i < dstW; i++) {
2317  dest16[0][i] = av_bswap16(dest16[0][i]);
2318  dest16[1][i] = av_bswap16(dest16[1][i]);
2319  dest16[2][i] = av_bswap16(dest16[2][i]);
2320  if (hasAlpha)
2321  dest16[3][i] = av_bswap16(dest16[3][i]);
2322  }
2323  }
2324 }
2325 
2326 static void
2327 yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter,
2328  const int16_t **lumSrcx, int lumFilterSize,
2329  const int16_t *chrFilter, const int16_t **chrUSrcx,
2330  const int16_t **chrVSrcx, int chrFilterSize,
2331  const int16_t **alpSrcx, uint8_t **dest,
2332  int dstW, int y)
2333 {
2334  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2335  int i;
2336  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2337  uint16_t **dest16 = (uint16_t**)dest;
2338  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2339  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2340  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2341  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2342 
2343  for (i = 0; i < dstW; i++) {
2344  int j;
2345  int Y = -0x40000000;
2346  int U = -(128 << 23);
2347  int V = -(128 << 23);
2348  int R, G, B, A;
2349 
2350  for (j = 0; j < lumFilterSize; j++)
2351  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2352 
2353  for (j = 0; j < chrFilterSize; j++) {
2354  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2355  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2356  }
2357 
2358  Y >>= 14;
2359  Y += 0x10000;
2360  U >>= 14;
2361  V >>= 14;
2362 
2363  if (hasAlpha) {
2364  A = -0x40000000;
2365 
2366  for (j = 0; j < lumFilterSize; j++)
2367  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2368 
2369  A >>= 1;
2370  A += 0x20002000;
2371  }
2372 
2373  Y -= c->yuv2rgb_y_offset;
2374  Y *= c->yuv2rgb_y_coeff;
2375  Y += (1 << 13) - (1 << 29);
2376  R = V * c->yuv2rgb_v2r_coeff;
2377  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2378  B = U * c->yuv2rgb_u2b_coeff;
2379 
2380  dest16[2][i] = av_clip_uintp2(((Y + R) >> 14) + (1<<15), 16);
2381  dest16[0][i] = av_clip_uintp2(((Y + G) >> 14) + (1<<15), 16);
2382  dest16[1][i] = av_clip_uintp2(((Y + B) >> 14) + (1<<15), 16);
2383 
2384  if (hasAlpha)
2385  dest16[3][i] = av_clip_uintp2(A, 30) >> 14;
2386  }
2387  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2388  for (i = 0; i < dstW; i++) {
2389  dest16[0][i] = av_bswap16(dest16[0][i]);
2390  dest16[1][i] = av_bswap16(dest16[1][i]);
2391  dest16[2][i] = av_bswap16(dest16[2][i]);
2392  if (hasAlpha)
2393  dest16[3][i] = av_bswap16(dest16[3][i]);
2394  }
2395  }
2396 }
2397 
2398 static void
2399 yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter,
2400  const int16_t **lumSrcx, int lumFilterSize,
2401  const int16_t *chrFilter, const int16_t **chrUSrcx,
2402  const int16_t **chrVSrcx, int chrFilterSize,
2403  const int16_t **alpSrcx, uint8_t **dest,
2404  int dstW, int y)
2405 {
2406  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2407  int i;
2408  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2409  uint32_t **dest32 = (uint32_t**)dest;
2410  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2411  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2412  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2413  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2414  static const float float_mult = 1.0f / 65535.0f;
2415 
2416  for (i = 0; i < dstW; i++) {
2417  int j;
2418  int Y = -0x40000000;
2419  int U = -(128 << 23);
2420  int V = -(128 << 23);
2421  int R, G, B, A;
2422 
2423  for (j = 0; j < lumFilterSize; j++)
2424  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2425 
2426  for (j = 0; j < chrFilterSize; j++) {
2427  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2428  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2429  }
2430 
2431  Y >>= 14;
2432  Y += 0x10000;
2433  U >>= 14;
2434  V >>= 14;
2435 
2436  if (hasAlpha) {
2437  A = -0x40000000;
2438 
2439  for (j = 0; j < lumFilterSize; j++)
2440  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2441 
2442  A >>= 1;
2443  A += 0x20002000;
2444  }
2445 
2446  Y -= c->yuv2rgb_y_offset;
2447  Y *= c->yuv2rgb_y_coeff;
2448  Y += (1 << 13) - (1 << 29);
2449  R = V * c->yuv2rgb_v2r_coeff;
2450  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2451  B = U * c->yuv2rgb_u2b_coeff;
2452 
2453  R = av_clip_uintp2(((Y + R) >> 14) + (1<<15), 16);
2454  G = av_clip_uintp2(((Y + G) >> 14) + (1<<15), 16);
2455  B = av_clip_uintp2(((Y + B) >> 14) + (1<<15), 16);
2456 
2457  dest32[0][i] = av_float2int(float_mult * (float)G);
2458  dest32[1][i] = av_float2int(float_mult * (float)B);
2459  dest32[2][i] = av_float2int(float_mult * (float)R);
2460  if (hasAlpha)
2461  dest32[3][i] = av_float2int(float_mult * (float)(av_clip_uintp2(A, 30) >> 14));
2462  }
2463  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2464  for (i = 0; i < dstW; i++) {
2465  dest32[0][i] = av_bswap32(dest32[0][i]);
2466  dest32[1][i] = av_bswap32(dest32[1][i]);
2467  dest32[2][i] = av_bswap32(dest32[2][i]);
2468  if (hasAlpha)
2469  dest32[3][i] = av_bswap32(dest32[3][i]);
2470  }
2471  }
2472 }
2473 
2474 static void
2475 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
2476  const int16_t *ubuf[2], const int16_t *vbuf[2],
2477  const int16_t *abuf0, uint8_t *dest, int dstW,
2478  int uvalpha, int y)
2479 {
2480  int hasAlpha = !!abuf0;
2481  int i;
2482 
2483  for (i = 0; i < dstW; i++) {
2484  int Y = (buf0[i] + 64) >> 7;
2485  int A;
2486 
2487  Y = av_clip_uint8(Y);
2488 
2489  if (hasAlpha) {
2490  A = (abuf0[i] + 64) >> 7;
2491  if (A & 0x100)
2492  A = av_clip_uint8(A);
2493  }
2494 
2495  dest[i * 2 ] = Y;
2496  dest[i * 2 + 1] = hasAlpha ? A : 255;
2497  }
2498 }
2499 
2500 static void
2501 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
2502  const int16_t *ubuf[2], const int16_t *vbuf[2],
2503  const int16_t *abuf[2], uint8_t *dest, int dstW,
2504  int yalpha, int uvalpha, int y)
2505 {
2506  int hasAlpha = abuf && abuf[0] && abuf[1];
2507  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2508  *abuf0 = hasAlpha ? abuf[0] : NULL,
2509  *abuf1 = hasAlpha ? abuf[1] : NULL;
2510  int yalpha1 = 4096 - yalpha;
2511  int i;
2512 
2513  av_assert2(yalpha <= 4096U);
2514 
2515  for (i = 0; i < dstW; i++) {
2516  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2517  int A;
2518 
2519  Y = av_clip_uint8(Y);
2520 
2521  if (hasAlpha) {
2522  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2523  A = av_clip_uint8(A);
2524  }
2525 
2526  dest[i * 2 ] = Y;
2527  dest[i * 2 + 1] = hasAlpha ? A : 255;
2528  }
2529 }
2530 
2531 static void
2532 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
2533  const int16_t **lumSrc, int lumFilterSize,
2534  const int16_t *chrFilter, const int16_t **chrUSrc,
2535  const int16_t **chrVSrc, int chrFilterSize,
2536  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2537 {
2538  int hasAlpha = !!alpSrc;
2539  int i;
2540 
2541  for (i = 0; i < dstW; i++) {
2542  int j;
2543  int Y = 1 << 18, A = 1 << 18;
2544 
2545  for (j = 0; j < lumFilterSize; j++)
2546  Y += lumSrc[j][i] * lumFilter[j];
2547 
2548  Y >>= 19;
2549  if (Y & 0x100)
2550  Y = av_clip_uint8(Y);
2551 
2552  if (hasAlpha) {
2553  for (j = 0; j < lumFilterSize; j++)
2554  A += alpSrc[j][i] * lumFilter[j];
2555 
2556  A >>= 19;
2557 
2558  if (A & 0x100)
2559  A = av_clip_uint8(A);
2560  }
2561 
2562  dest[2 * i ] = Y;
2563  dest[2 * i + 1] = hasAlpha ? A : 255;
2564  }
2565 }
2566 
2567 static void
2568 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2569  const int16_t **_lumSrc, int lumFilterSize,
2570  const int16_t *chrFilter, const int16_t **_chrUSrc,
2571  const int16_t **_chrVSrc, int chrFilterSize,
2572  const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2573 {
2574  const int32_t **lumSrc = (const int32_t **) _lumSrc,
2575  **chrUSrc = (const int32_t **) _chrUSrc,
2576  **chrVSrc = (const int32_t **) _chrVSrc,
2577  **alpSrc = (const int32_t **) _alpSrc;
2578  int hasAlpha = !!alpSrc;
2579  int i;
2580 
2581  for (i = 0; i < dstW; i++) {
2582  int Y = 1 << 14, U = 1 << 14;
2583  int V = 1 << 14, A = 1 << 14;
2584  int j;
2585 
2586  Y -= 0x40000000;
2587  U -= 0x40000000;
2588  V -= 0x40000000;
2589  A -= 0x40000000;
2590 
2591  for (j = 0; j < lumFilterSize; j++)
2592  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2593 
2594  for (j = 0; j < chrFilterSize; j++)
2595  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2596 
2597  for (j = 0; j < chrFilterSize; j++)
2598  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2599 
2600  if (hasAlpha)
2601  for (j = 0; j < lumFilterSize; j++)
2602  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2603 
2604  Y = 0x8000 + av_clip_int16(Y >> 15);
2605  U = 0x8000 + av_clip_int16(U >> 15);
2606  V = 0x8000 + av_clip_int16(V >> 15);
2607  A = 0x8000 + av_clip_int16(A >> 15);
2608 
2609  AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2610  AV_WL16(dest + 8 * i + 2, Y);
2611  AV_WL16(dest + 8 * i + 4, U);
2612  AV_WL16(dest + 8 * i + 6, V);
2613  }
2614 }
2615 
2616 
2617 static av_always_inline void
2618 yuv2v30_X_c_template(SwsContext *c, const int16_t *lumFilter,
2619  const int16_t **lumSrc, int lumFilterSize,
2620  const int16_t *chrFilter, const int16_t **chrUSrc,
2621  const int16_t **chrVSrc, int chrFilterSize,
2622  const int16_t **alpSrc, uint8_t *dest, int dstW, int y,
2623  int shift)
2624 {
2625  int i;
2626  for (i = 0; i < dstW; i++) {
2627  int Y = 1 << 16, U = 1 << 16, V = 1 << 16;
2628  int j;
2629 
2630  for (j = 0; j < lumFilterSize; j++)
2631  Y += lumSrc[j][i] * lumFilter[j];
2632 
2633  for (j = 0; j < chrFilterSize; j++) {
2634  U += chrUSrc[j][i] * chrFilter[j];
2635  V += chrVSrc[j][i] * chrFilter[j];
2636  }
2637 
2638  Y = av_clip_uintp2(Y >> 17, 10);
2639  U = av_clip_uintp2(U >> 17, 10);
2640  V = av_clip_uintp2(V >> 17, 10);
2641 
2642  AV_WL32(dest + 4 * i, U << (shift + 0) |
2643  Y << (shift + 10) |
2644  (unsigned)V << (shift + 20));
2645  }
2646 }
2647 
2648 #define V30LE_WRAPPER(name, shift) \
2649 static void yuv2 ## name ## _X_c(SwsContext *c, const int16_t *lumFilter, \
2650  const int16_t **lumSrc, int lumFilterSize, \
2651  const int16_t *chrFilter, const int16_t **chrUSrc, \
2652  const int16_t **chrVSrc, int chrFilterSize, \
2653  const int16_t **alpSrc, uint8_t *dest, int dstW, \
2654  int y) \
2655 { \
2656  yuv2v30_X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
2657  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
2658  alpSrc, dest, dstW, y, shift); \
2659 }
2660 
2661 V30LE_WRAPPER(xv30le, 0)
2662 V30LE_WRAPPER(v30xle, 2)
2663 
2664 static void
2665 yuv2xv36le_X_c(SwsContext *c, const int16_t *lumFilter,
2666  const int16_t **lumSrc, int lumFilterSize,
2667  const int16_t *chrFilter, const int16_t **chrUSrc,
2668  const int16_t **chrVSrc, int chrFilterSize,
2669  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2670 {
2671  int i;
2672  for (i = 0; i < dstW; i++) {
2673  int Y = 1 << 14, U = 1 << 14, V = 1 << 14, A = 255;
2674  int j;
2675 
2676  for (j = 0; j < lumFilterSize; j++)
2677  Y += lumSrc[j][i] * lumFilter[j];
2678 
2679  for (j = 0; j < chrFilterSize; j++) {
2680  U += chrUSrc[j][i] * chrFilter[j];
2681  V += chrVSrc[j][i] * chrFilter[j];
2682  }
2683 
2684  AV_WL16(dest + 8 * i + 2, av_clip_uintp2(Y >> 15, 12) << 4);
2685  AV_WL16(dest + 8 * i + 0, av_clip_uintp2(U >> 15, 12) << 4);
2686  AV_WL16(dest + 8 * i + 4, av_clip_uintp2(V >> 15, 12) << 4);
2687  AV_WL16(dest + 8 * i + 6, A);
2688  }
2689 }
2690 
2691 #define output_pixels(pos, A, Y, U, V) \
2692  if (target == AV_PIX_FMT_AYUV) { \
2693  dest[pos + 0] = A; \
2694  dest[pos + 1] = Y; \
2695  dest[pos + 2] = U; \
2696  dest[pos + 3] = V; \
2697  } else if (target == AV_PIX_FMT_UYVA) { \
2698  dest[pos + 0] = U; \
2699  dest[pos + 1] = Y; \
2700  dest[pos + 2] = V; \
2701  dest[pos + 3] = A; \
2702  } else { /* AV_PIX_FMT_VUYA || AV_PIX_FMT_VUYX */ \
2703  dest[pos + 0] = V; \
2704  dest[pos + 1] = U; \
2705  dest[pos + 2] = Y; \
2706  dest[pos + 3] = A; \
2707  }
2708 
2709 static av_always_inline void
2710 yuv2ayuv_1_c_template(SwsContext *c, const int16_t *buf0,
2711  const int16_t *ubuf[2], const int16_t *vbuf[2],
2712  const int16_t *abuf0, uint8_t *dest, int dstW,
2713  int uvalpha, int y, enum AVPixelFormat target)
2714 {
2715  int hasAlpha = !!abuf0;
2716  int i;
2717 
2718  if (uvalpha < 2048) {
2719  for (i = 0; i < dstW; i++) {
2720  int Y = (buf0[i] + 64) >> 7;
2721  int U = (ubuf[0][i] + 64) >> 7;
2722  int V = (vbuf[0][i] + 64) >> 7;
2723  int A = 255;
2724 
2725  if (Y & 0x100)
2726  Y = av_clip_uint8(Y);
2727  if (U & 0x100)
2728  U = av_clip_uint8(U);
2729  if (V & 0x100)
2730  V = av_clip_uint8(V);
2731 
2732  if (hasAlpha) {
2733  A = (abuf0[i] + 64) >> 7;
2734  if (A & 0x100)
2735  A = av_clip_uint8(A);
2736  }
2737 
2738  output_pixels(i * 4, A, Y, U, V)
2739  }
2740  } else {
2741  for (i = 0; i < dstW; i++) {
2742  int Y = (buf0[i] + 64) >> 7;
2743  int U = (ubuf[0][i] + ubuf[1][i] + 128) >> 8;
2744  int V = (vbuf[0][i] + vbuf[1][i] + 128) >> 8;
2745  int A = 255;
2746 
2747  if (Y & 0x100)
2748  Y = av_clip_uint8(Y);
2749  if (U & 0x100)
2750  U = av_clip_uint8(U);
2751  if (V & 0x100)
2752  V = av_clip_uint8(V);
2753 
2754  if (hasAlpha) {
2755  A = (abuf0[i] + 64) >> 7;
2756  if (A & 0x100)
2757  A = av_clip_uint8(A);
2758  }
2759 
2760  output_pixels(i * 4, A, Y, U, V)
2761  }
2762  }
2763 }
2764 
2765 static av_always_inline void
2766 yuv2ayuv_2_c_template(SwsContext *c, const int16_t *buf[2],
2767  const int16_t *ubuf[2], const int16_t *vbuf[2],
2768  const int16_t *abuf[2], uint8_t *dest, int dstW,
2769  int yalpha, int uvalpha, int y,
2770  enum AVPixelFormat target)
2771 {
2772  int hasAlpha = abuf && abuf[0] && abuf[1];
2773  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2774  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2775  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2776  *abuf0 = hasAlpha ? abuf[0] : NULL,
2777  *abuf1 = hasAlpha ? abuf[1] : NULL;
2778  int yalpha1 = 4096 - yalpha;
2779  int uvalpha1 = 4096 - uvalpha;
2780  int i;
2781 
2782  av_assert2(yalpha <= 4096U);
2783  av_assert2(uvalpha <= 4096U);
2784 
2785  for (i = 0; i < dstW; i++) {
2786  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2787  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
2788  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
2789  int A = 255;
2790 
2791  if (Y & 0x100)
2792  Y = av_clip_uint8(Y);
2793  if (U & 0x100)
2794  U = av_clip_uint8(U);
2795  if (V & 0x100)
2796  V = av_clip_uint8(V);
2797 
2798  if (hasAlpha) {
2799  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2800  A = av_clip_uint8(A);
2801  }
2802 
2803  output_pixels(i * 4, A, Y, U, V)
2804  }
2805 }
2806 
2807 static av_always_inline void
2808 yuv2ayuv_X_c_template(SwsContext *c, const int16_t *lumFilter,
2809  const int16_t **lumSrc, int lumFilterSize,
2810  const int16_t *chrFilter, const int16_t **chrUSrc,
2811  const int16_t **chrVSrc, int chrFilterSize,
2812  const int16_t **alpSrc, uint8_t *dest, int dstW,
2813  int y, enum AVPixelFormat target)
2814 {
2815  int i;
2816 
2817  for (i = 0; i < dstW; i++) {
2818  int j;
2819  int Y = 1 << 18, U = 1 << 18;
2820  int V = 1 << 18, A = 255;
2821 
2822  for (j = 0; j < lumFilterSize; j++)
2823  Y += lumSrc[j][i] * lumFilter[j];
2824 
2825  for (j = 0; j < chrFilterSize; j++)
2826  U += chrUSrc[j][i] * chrFilter[j];
2827 
2828  for (j = 0; j < chrFilterSize; j++)
2829  V += chrVSrc[j][i] * chrFilter[j];
2830 
2831  Y >>= 19;
2832  U >>= 19;
2833  V >>= 19;
2834 
2835  if (Y & 0x100)
2836  Y = av_clip_uint8(Y);
2837  if (U & 0x100)
2838  U = av_clip_uint8(U);
2839  if (V & 0x100)
2840  V = av_clip_uint8(V);
2841 
2842  if (alpSrc) {
2843  A = 1 << 18;
2844 
2845  for (j = 0; j < lumFilterSize; j++)
2846  A += alpSrc[j][i] * lumFilter[j];
2847 
2848  A >>= 19;
2849 
2850  if (A & 0x100)
2851  A = av_clip_uint8(A);
2852  }
2853 
2854  output_pixels(i * 4, A, Y, U, V)
2855  }
2856 }
2857 
2858 #undef output_pixels
2859 
2860 #define AYUVPACKEDWRAPPER(name, fmt) \
2861 static void yuv2 ## name ## _X_c(SwsContext *c, const int16_t *lumFilter, \
2862  const int16_t **lumSrc, int lumFilterSize, \
2863  const int16_t *chrFilter, const int16_t **chrUSrc, \
2864  const int16_t **chrVSrc, int chrFilterSize, \
2865  const int16_t **alpSrc, uint8_t *dest, int dstW, \
2866  int y) \
2867 { \
2868  yuv2ayuv_X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
2869  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
2870  alpSrc, dest, dstW, y, fmt); \
2871 } \
2872  \
2873 static void yuv2 ## name ## _2_c(SwsContext *c, const int16_t *buf[2], \
2874  const int16_t *ubuf[2], const int16_t *vbuf[2], \
2875  const int16_t *abuf[2], uint8_t *dest, int dstW, \
2876  int yalpha, int uvalpha, int y) \
2877 { \
2878  yuv2ayuv_2_c_template(c, buf, ubuf, vbuf, abuf, \
2879  dest, dstW, yalpha, uvalpha, y, fmt); \
2880 } \
2881  \
2882 static void yuv2 ## name ## _1_c(SwsContext *c, const int16_t *buf0, \
2883  const int16_t *ubuf[2], const int16_t *vbuf[2], \
2884  const int16_t *abuf0, uint8_t *dest, int dstW, \
2885  int uvalpha, int y) \
2886 { \
2887  yuv2ayuv_1_c_template(c, buf0, ubuf, vbuf, \
2888  abuf0, dest, dstW, uvalpha, \
2889  y, fmt); \
2890 }
2891 
2895 
2896 #define output_pixel(pos, val, bits) \
2897  AV_WL16(pos, av_clip_uintp2(val >> shift, bits) << output_shift);
2898 
2899 #define yuv2y2xx_wrapper(bits) \
2900  static void \
2901  yuv2y2 ## bits ## le_X_c(SwsContext *c, const int16_t *lumFilter, \
2902  const int16_t **lumSrc, int lumFilterSize, \
2903  const int16_t *chrFilter, \
2904  const int16_t **chrUSrc, \
2905  const int16_t **chrVSrc, int chrFilterSize, \
2906  const int16_t **alpSrc, \
2907  uint8_t *dest, int dstW, int y) \
2908  { \
2909  int i, j; \
2910  int shift = 11 + 16 - bits; \
2911  int output_shift = 16 - bits; \
2912  for (i = 0; i < ((dstW + 1) >> 1); i++) { \
2913  int Y1 = 1 << (shift - 1), Y2 = 1 << (shift - 1); \
2914  int U = 1 << (shift - 1), V = 1 << (shift - 1); \
2915  \
2916  for (j = 0; j < lumFilterSize; j++) { \
2917  Y1 += lumSrc[j][i * 2] * lumFilter[j]; \
2918  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; \
2919  } \
2920  \
2921  for (j = 0; j < chrFilterSize; j++) { \
2922  U += chrUSrc[j][i] * chrFilter[j]; \
2923  V += chrVSrc[j][i] * chrFilter[j]; \
2924  } \
2925  \
2926  output_pixel(dest + 8 * i + 0, Y1, bits); \
2927  output_pixel(dest + 8 * i + 2, U, bits); \
2928  output_pixel(dest + 8 * i + 4, Y2, bits); \
2929  output_pixel(dest + 8 * i + 6, V, bits); \
2930  } \
2931  }
2932 
2934 yuv2y2xx_wrapper(12)
2935 
2936 static void
2937 yuv2vyu444_1_c(SwsContext *c, const int16_t *buf0,
2938  const int16_t *ubuf[2], const int16_t *vbuf[2],
2939  const int16_t *abuf0, uint8_t *dest, int dstW,
2940  int uvalpha, int y)
2941 {
2942  int i;
2943 
2944  if (uvalpha < 2048) {
2945  for (i = 0; i < dstW; i++) {
2946  int Y = (buf0[i] + 64) >> 7;
2947  int U = (ubuf[0][i] + 64) >> 7;
2948  int V = (vbuf[0][i] + 64) >> 7;
2949 
2950  if (Y & 0x100)
2951  Y = av_clip_uint8(Y);
2952  if (U & 0x100)
2953  U = av_clip_uint8(U);
2954  if (V & 0x100)
2955  V = av_clip_uint8(V);
2956 
2957  dest[3 * i ] = V;
2958  dest[3 * i + 1] = Y;
2959  dest[3 * i + 2] = U;
2960  }
2961  } else {
2962  for (i = 0; i < dstW; i++) {
2963  int Y = (buf0[i] + 64) >> 7;
2964  int U = (ubuf[0][i] + ubuf[1][i] + 128) >> 8;
2965  int V = (vbuf[0][i] + vbuf[1][i] + 128) >> 8;
2966 
2967  if (Y & 0x100)
2968  Y = av_clip_uint8(Y);
2969  if (U & 0x100)
2970  U = av_clip_uint8(U);
2971  if (V & 0x100)
2972  V = av_clip_uint8(V);
2973 
2974  dest[3 * i ] = V;
2975  dest[3 * i + 1] = Y;
2976  dest[3 * i + 2] = U;
2977  }
2978  }
2979 }
2980 
2981 static void
2982 yuv2vyu444_2_c(SwsContext *c, const int16_t *buf[2],
2983  const int16_t *ubuf[2], const int16_t *vbuf[2],
2984  const int16_t *abuf[2], uint8_t *dest, int dstW,
2985  int yalpha, int uvalpha, int y)
2986 {
2987  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2988  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2989  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
2990  int yalpha1 = 4096 - yalpha;
2991  int uvalpha1 = 4096 - uvalpha;
2992  int i;
2993 
2994  av_assert2(yalpha <= 4096U);
2995  av_assert2(uvalpha <= 4096U);
2996 
2997  for (i = 0; i < dstW; i++) {
2998  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2999  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
3000  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
3001 
3002  if (Y & 0x100)
3003  Y = av_clip_uint8(Y);
3004  if (U & 0x100)
3005  U = av_clip_uint8(U);
3006  if (V & 0x100)
3007  V = av_clip_uint8(V);
3008 
3009  dest[3 * i ] = V;
3010  dest[3 * i + 1] = Y;
3011  dest[3 * i + 2] = U;
3012  }
3013 }
3014 
3015 static void
3016 yuv2vyu444_X_c(SwsContext *c, const int16_t *lumFilter,
3017  const int16_t **lumSrc, int lumFilterSize,
3018  const int16_t *chrFilter, const int16_t **chrUSrc,
3019  const int16_t **chrVSrc, int chrFilterSize,
3020  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
3021 {
3022  int i;
3023 
3024  for (i = 0; i < dstW; i++) {
3025  int j;
3026  int Y = 1 << 18, U = 1 << 18;
3027  int V = 1 << 18;
3028 
3029  for (j = 0; j < lumFilterSize; j++)
3030  Y += lumSrc[j][i] * lumFilter[j];
3031 
3032  for (j = 0; j < chrFilterSize; j++)
3033  U += chrUSrc[j][i] * chrFilter[j];
3034 
3035  for (j = 0; j < chrFilterSize; j++)
3036  V += chrVSrc[j][i] * chrFilter[j];
3037 
3038  Y >>= 19;
3039  U >>= 19;
3040  V >>= 19;
3041 
3042  if (Y & 0x100)
3043  Y = av_clip_uint8(Y);
3044  if (U & 0x100)
3045  U = av_clip_uint8(U);
3046  if (V & 0x100)
3047  V = av_clip_uint8(V);
3048 
3049  dest[3 * i ] = V;
3050  dest[3 * i + 1] = Y;
3051  dest[3 * i + 2] = U;
3052  }
3053 }
3054 
3055 #undef output_pixel
3056 
3058  yuv2planar1_fn *yuv2plane1,
3060  yuv2interleavedX_fn *yuv2nv12cX,
3061  yuv2packed1_fn *yuv2packed1,
3062  yuv2packed2_fn *yuv2packed2,
3063  yuv2packedX_fn *yuv2packedX,
3064  yuv2anyX_fn *yuv2anyX)
3065 {
3066  enum AVPixelFormat dstFormat = c->dstFormat;
3067  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
3068 
3069  if (isSemiPlanarYUV(dstFormat) && isDataInHighBits(dstFormat)) {
3070  if (desc->comp[0].depth == 10) {
3071  *yuv2plane1 = isBE(dstFormat) ? yuv2p010l1_BE_c : yuv2p010l1_LE_c;
3072  *yuv2planeX = isBE(dstFormat) ? yuv2p010lX_BE_c : yuv2p010lX_LE_c;
3073  *yuv2nv12cX = isBE(dstFormat) ? yuv2p010cX_BE_c : yuv2p010cX_LE_c;
3074  } else if (desc->comp[0].depth == 12) {
3075  *yuv2plane1 = isBE(dstFormat) ? yuv2p012l1_BE_c : yuv2p012l1_LE_c;
3076  *yuv2planeX = isBE(dstFormat) ? yuv2p012lX_BE_c : yuv2p012lX_LE_c;
3077  *yuv2nv12cX = isBE(dstFormat) ? yuv2p012cX_BE_c : yuv2p012cX_LE_c;
3078  } else
3079  av_assert0(0);
3080  } else if (is16BPS(dstFormat)) {
3081  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
3082  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
3083  if (isSemiPlanarYUV(dstFormat)) {
3084  *yuv2nv12cX = isBE(dstFormat) ? yuv2nv12cX_16BE_c : yuv2nv12cX_16LE_c;
3085  }
3086  } else if (isNBPS(dstFormat)) {
3087  if (desc->comp[0].depth == 9) {
3088  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
3089  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
3090  } else if (desc->comp[0].depth == 10) {
3091  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
3092  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
3093  } else if (desc->comp[0].depth == 12) {
3094  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
3095  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
3096  } else if (desc->comp[0].depth == 14) {
3097  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
3098  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
3099  } else
3100  av_assert0(0);
3101  } else if (dstFormat == AV_PIX_FMT_GRAYF32BE) {
3102  *yuv2planeX = yuv2planeX_floatBE_c;
3103  *yuv2plane1 = yuv2plane1_floatBE_c;
3104  } else if (dstFormat == AV_PIX_FMT_GRAYF32LE) {
3105  *yuv2planeX = yuv2planeX_floatLE_c;
3106  *yuv2plane1 = yuv2plane1_floatLE_c;
3107  } else {
3108  *yuv2plane1 = yuv2plane1_8_c;
3110  if (isSemiPlanarYUV(dstFormat))
3111  *yuv2nv12cX = yuv2nv12cX_c;
3112  }
3113 
3114  if(c->flags & SWS_FULL_CHR_H_INT) {
3115  switch (dstFormat) {
3116  case AV_PIX_FMT_RGBA:
3117 #if CONFIG_SMALL
3118  *yuv2packedX = yuv2rgba32_full_X_c;
3119  *yuv2packed2 = yuv2rgba32_full_2_c;
3120  *yuv2packed1 = yuv2rgba32_full_1_c;
3121 #else
3122 #if CONFIG_SWSCALE_ALPHA
3123  if (c->needAlpha) {
3124  *yuv2packedX = yuv2rgba32_full_X_c;
3125  *yuv2packed2 = yuv2rgba32_full_2_c;
3126  *yuv2packed1 = yuv2rgba32_full_1_c;
3127  } else
3128 #endif /* CONFIG_SWSCALE_ALPHA */
3129  {
3130  *yuv2packedX = yuv2rgbx32_full_X_c;
3131  *yuv2packed2 = yuv2rgbx32_full_2_c;
3132  *yuv2packed1 = yuv2rgbx32_full_1_c;
3133  }
3134 #endif /* !CONFIG_SMALL */
3135  break;
3136  case AV_PIX_FMT_ARGB:
3137 #if CONFIG_SMALL
3138  *yuv2packedX = yuv2argb32_full_X_c;
3139  *yuv2packed2 = yuv2argb32_full_2_c;
3140  *yuv2packed1 = yuv2argb32_full_1_c;
3141 #else
3142 #if CONFIG_SWSCALE_ALPHA
3143  if (c->needAlpha) {
3144  *yuv2packedX = yuv2argb32_full_X_c;
3145  *yuv2packed2 = yuv2argb32_full_2_c;
3146  *yuv2packed1 = yuv2argb32_full_1_c;
3147  } else
3148 #endif /* CONFIG_SWSCALE_ALPHA */
3149  {
3150  *yuv2packedX = yuv2xrgb32_full_X_c;
3151  *yuv2packed2 = yuv2xrgb32_full_2_c;
3152  *yuv2packed1 = yuv2xrgb32_full_1_c;
3153  }
3154 #endif /* !CONFIG_SMALL */
3155  break;
3156  case AV_PIX_FMT_BGRA:
3157 #if CONFIG_SMALL
3158  *yuv2packedX = yuv2bgra32_full_X_c;
3159  *yuv2packed2 = yuv2bgra32_full_2_c;
3160  *yuv2packed1 = yuv2bgra32_full_1_c;
3161 #else
3162 #if CONFIG_SWSCALE_ALPHA
3163  if (c->needAlpha) {
3164  *yuv2packedX = yuv2bgra32_full_X_c;
3165  *yuv2packed2 = yuv2bgra32_full_2_c;
3166  *yuv2packed1 = yuv2bgra32_full_1_c;
3167  } else
3168 #endif /* CONFIG_SWSCALE_ALPHA */
3169  {
3170  *yuv2packedX = yuv2bgrx32_full_X_c;
3171  *yuv2packed2 = yuv2bgrx32_full_2_c;
3172  *yuv2packed1 = yuv2bgrx32_full_1_c;
3173  }
3174 #endif /* !CONFIG_SMALL */
3175  break;
3176  case AV_PIX_FMT_ABGR:
3177 #if CONFIG_SMALL
3178  *yuv2packedX = yuv2abgr32_full_X_c;
3179  *yuv2packed2 = yuv2abgr32_full_2_c;
3180  *yuv2packed1 = yuv2abgr32_full_1_c;
3181 #else
3182 #if CONFIG_SWSCALE_ALPHA
3183  if (c->needAlpha) {
3184  *yuv2packedX = yuv2abgr32_full_X_c;
3185  *yuv2packed2 = yuv2abgr32_full_2_c;
3186  *yuv2packed1 = yuv2abgr32_full_1_c;
3187  } else
3188 #endif /* CONFIG_SWSCALE_ALPHA */
3189  {
3190  *yuv2packedX = yuv2xbgr32_full_X_c;
3191  *yuv2packed2 = yuv2xbgr32_full_2_c;
3192  *yuv2packed1 = yuv2xbgr32_full_1_c;
3193  }
3194 #endif /* !CONFIG_SMALL */
3195  break;
3196  case AV_PIX_FMT_RGBA64LE:
3197 #if CONFIG_SWSCALE_ALPHA
3198  if (c->needAlpha) {
3199  *yuv2packedX = yuv2rgba64le_full_X_c;
3200  *yuv2packed2 = yuv2rgba64le_full_2_c;
3201  *yuv2packed1 = yuv2rgba64le_full_1_c;
3202  } else
3203 #endif /* CONFIG_SWSCALE_ALPHA */
3204  {
3205  *yuv2packedX = yuv2rgbx64le_full_X_c;
3206  *yuv2packed2 = yuv2rgbx64le_full_2_c;
3207  *yuv2packed1 = yuv2rgbx64le_full_1_c;
3208  }
3209  break;
3210  case AV_PIX_FMT_RGBA64BE:
3211 #if CONFIG_SWSCALE_ALPHA
3212  if (c->needAlpha) {
3213  *yuv2packedX = yuv2rgba64be_full_X_c;
3214  *yuv2packed2 = yuv2rgba64be_full_2_c;
3215  *yuv2packed1 = yuv2rgba64be_full_1_c;
3216  } else
3217 #endif /* CONFIG_SWSCALE_ALPHA */
3218  {
3219  *yuv2packedX = yuv2rgbx64be_full_X_c;
3220  *yuv2packed2 = yuv2rgbx64be_full_2_c;
3221  *yuv2packed1 = yuv2rgbx64be_full_1_c;
3222  }
3223  break;
3224  case AV_PIX_FMT_BGRA64LE:
3225 #if CONFIG_SWSCALE_ALPHA
3226  if (c->needAlpha) {
3227  *yuv2packedX = yuv2bgra64le_full_X_c;
3228  *yuv2packed2 = yuv2bgra64le_full_2_c;
3229  *yuv2packed1 = yuv2bgra64le_full_1_c;
3230  } else
3231 #endif /* CONFIG_SWSCALE_ALPHA */
3232  {
3233  *yuv2packedX = yuv2bgrx64le_full_X_c;
3234  *yuv2packed2 = yuv2bgrx64le_full_2_c;
3235  *yuv2packed1 = yuv2bgrx64le_full_1_c;
3236  }
3237  break;
3238  case AV_PIX_FMT_BGRA64BE:
3239 #if CONFIG_SWSCALE_ALPHA
3240  if (c->needAlpha) {
3241  *yuv2packedX = yuv2bgra64be_full_X_c;
3242  *yuv2packed2 = yuv2bgra64be_full_2_c;
3243  *yuv2packed1 = yuv2bgra64be_full_1_c;
3244  } else
3245 #endif /* CONFIG_SWSCALE_ALPHA */
3246  {
3247  *yuv2packedX = yuv2bgrx64be_full_X_c;
3248  *yuv2packed2 = yuv2bgrx64be_full_2_c;
3249  *yuv2packed1 = yuv2bgrx64be_full_1_c;
3250  }
3251  break;
3252 
3253  case AV_PIX_FMT_RGB24:
3254  *yuv2packedX = yuv2rgb24_full_X_c;
3255  *yuv2packed2 = yuv2rgb24_full_2_c;
3256  *yuv2packed1 = yuv2rgb24_full_1_c;
3257  break;
3258  case AV_PIX_FMT_BGR24:
3259  *yuv2packedX = yuv2bgr24_full_X_c;
3260  *yuv2packed2 = yuv2bgr24_full_2_c;
3261  *yuv2packed1 = yuv2bgr24_full_1_c;
3262  break;
3263  case AV_PIX_FMT_RGB48LE:
3264  *yuv2packedX = yuv2rgb48le_full_X_c;
3265  *yuv2packed2 = yuv2rgb48le_full_2_c;
3266  *yuv2packed1 = yuv2rgb48le_full_1_c;
3267  break;
3268  case AV_PIX_FMT_BGR48LE:
3269  *yuv2packedX = yuv2bgr48le_full_X_c;
3270  *yuv2packed2 = yuv2bgr48le_full_2_c;
3271  *yuv2packed1 = yuv2bgr48le_full_1_c;
3272  break;
3273  case AV_PIX_FMT_RGB48BE:
3274  *yuv2packedX = yuv2rgb48be_full_X_c;
3275  *yuv2packed2 = yuv2rgb48be_full_2_c;
3276  *yuv2packed1 = yuv2rgb48be_full_1_c;
3277  break;
3278  case AV_PIX_FMT_BGR48BE:
3279  *yuv2packedX = yuv2bgr48be_full_X_c;
3280  *yuv2packed2 = yuv2bgr48be_full_2_c;
3281  *yuv2packed1 = yuv2bgr48be_full_1_c;
3282  break;
3283  case AV_PIX_FMT_BGR4_BYTE:
3284  *yuv2packedX = yuv2bgr4_byte_full_X_c;
3285  *yuv2packed2 = yuv2bgr4_byte_full_2_c;
3286  *yuv2packed1 = yuv2bgr4_byte_full_1_c;
3287  break;
3288  case AV_PIX_FMT_RGB4_BYTE:
3289  *yuv2packedX = yuv2rgb4_byte_full_X_c;
3290  *yuv2packed2 = yuv2rgb4_byte_full_2_c;
3291  *yuv2packed1 = yuv2rgb4_byte_full_1_c;
3292  break;
3293  case AV_PIX_FMT_BGR8:
3294  *yuv2packedX = yuv2bgr8_full_X_c;
3295  *yuv2packed2 = yuv2bgr8_full_2_c;
3296  *yuv2packed1 = yuv2bgr8_full_1_c;
3297  break;
3298  case AV_PIX_FMT_RGB8:
3299  *yuv2packedX = yuv2rgb8_full_X_c;
3300  *yuv2packed2 = yuv2rgb8_full_2_c;
3301  *yuv2packed1 = yuv2rgb8_full_1_c;
3302  break;
3303  case AV_PIX_FMT_GBRP:
3304  case AV_PIX_FMT_GBRP9BE:
3305  case AV_PIX_FMT_GBRP9LE:
3306  case AV_PIX_FMT_GBRP10BE:
3307  case AV_PIX_FMT_GBRP10LE:
3308  case AV_PIX_FMT_GBRP12BE:
3309  case AV_PIX_FMT_GBRP12LE:
3310  case AV_PIX_FMT_GBRP14BE:
3311  case AV_PIX_FMT_GBRP14LE:
3312  case AV_PIX_FMT_GBRAP:
3313  case AV_PIX_FMT_GBRAP10BE:
3314  case AV_PIX_FMT_GBRAP10LE:
3315  case AV_PIX_FMT_GBRAP12BE:
3316  case AV_PIX_FMT_GBRAP12LE:
3317  case AV_PIX_FMT_GBRAP14BE:
3318  case AV_PIX_FMT_GBRAP14LE:
3319  *yuv2anyX = yuv2gbrp_full_X_c;
3320  break;
3321  case AV_PIX_FMT_GBRP16BE:
3322  case AV_PIX_FMT_GBRP16LE:
3323  case AV_PIX_FMT_GBRAP16BE:
3324  case AV_PIX_FMT_GBRAP16LE:
3325  *yuv2anyX = yuv2gbrp16_full_X_c;
3326  break;
3327  case AV_PIX_FMT_GBRPF32BE:
3328  case AV_PIX_FMT_GBRPF32LE:
3329  case AV_PIX_FMT_GBRAPF32BE:
3330  case AV_PIX_FMT_GBRAPF32LE:
3331  *yuv2anyX = yuv2gbrpf32_full_X_c;
3332  break;
3333  }
3334  if (!*yuv2packedX && !*yuv2anyX)
3335  goto YUV_PACKED;
3336  } else {
3337  YUV_PACKED:
3338  switch (dstFormat) {
3339  case AV_PIX_FMT_RGBA64LE:
3340 #if CONFIG_SWSCALE_ALPHA
3341  if (c->needAlpha) {
3342  *yuv2packed1 = yuv2rgba64le_1_c;
3343  *yuv2packed2 = yuv2rgba64le_2_c;
3344  *yuv2packedX = yuv2rgba64le_X_c;
3345  } else
3346 #endif /* CONFIG_SWSCALE_ALPHA */
3347  {
3348  *yuv2packed1 = yuv2rgbx64le_1_c;
3349  *yuv2packed2 = yuv2rgbx64le_2_c;
3350  *yuv2packedX = yuv2rgbx64le_X_c;
3351  }
3352  break;
3353  case AV_PIX_FMT_RGBA64BE:
3354 #if CONFIG_SWSCALE_ALPHA
3355  if (c->needAlpha) {
3356  *yuv2packed1 = yuv2rgba64be_1_c;
3357  *yuv2packed2 = yuv2rgba64be_2_c;
3358  *yuv2packedX = yuv2rgba64be_X_c;
3359  } else
3360 #endif /* CONFIG_SWSCALE_ALPHA */
3361  {
3362  *yuv2packed1 = yuv2rgbx64be_1_c;
3363  *yuv2packed2 = yuv2rgbx64be_2_c;
3364  *yuv2packedX = yuv2rgbx64be_X_c;
3365  }
3366  break;
3367  case AV_PIX_FMT_BGRA64LE:
3368 #if CONFIG_SWSCALE_ALPHA
3369  if (c->needAlpha) {
3370  *yuv2packed1 = yuv2bgra64le_1_c;
3371  *yuv2packed2 = yuv2bgra64le_2_c;
3372  *yuv2packedX = yuv2bgra64le_X_c;
3373  } else
3374 #endif /* CONFIG_SWSCALE_ALPHA */
3375  {
3376  *yuv2packed1 = yuv2bgrx64le_1_c;
3377  *yuv2packed2 = yuv2bgrx64le_2_c;
3378  *yuv2packedX = yuv2bgrx64le_X_c;
3379  }
3380  break;
3381  case AV_PIX_FMT_BGRA64BE:
3382 #if CONFIG_SWSCALE_ALPHA
3383  if (c->needAlpha) {
3384  *yuv2packed1 = yuv2bgra64be_1_c;
3385  *yuv2packed2 = yuv2bgra64be_2_c;
3386  *yuv2packedX = yuv2bgra64be_X_c;
3387  } else
3388 #endif /* CONFIG_SWSCALE_ALPHA */
3389  {
3390  *yuv2packed1 = yuv2bgrx64be_1_c;
3391  *yuv2packed2 = yuv2bgrx64be_2_c;
3392  *yuv2packedX = yuv2bgrx64be_X_c;
3393  }
3394  break;
3395  case AV_PIX_FMT_RGB48LE:
3396  *yuv2packed1 = yuv2rgb48le_1_c;
3397  *yuv2packed2 = yuv2rgb48le_2_c;
3398  *yuv2packedX = yuv2rgb48le_X_c;
3399  break;
3400  case AV_PIX_FMT_RGB48BE:
3401  *yuv2packed1 = yuv2rgb48be_1_c;
3402  *yuv2packed2 = yuv2rgb48be_2_c;
3403  *yuv2packedX = yuv2rgb48be_X_c;
3404  break;
3405  case AV_PIX_FMT_BGR48LE:
3406  *yuv2packed1 = yuv2bgr48le_1_c;
3407  *yuv2packed2 = yuv2bgr48le_2_c;
3408  *yuv2packedX = yuv2bgr48le_X_c;
3409  break;
3410  case AV_PIX_FMT_BGR48BE:
3411  *yuv2packed1 = yuv2bgr48be_1_c;
3412  *yuv2packed2 = yuv2bgr48be_2_c;
3413  *yuv2packedX = yuv2bgr48be_X_c;
3414  break;
3415  case AV_PIX_FMT_RGB32:
3416  case AV_PIX_FMT_BGR32:
3417 #if CONFIG_SMALL
3418  *yuv2packed1 = yuv2rgb32_1_c;
3419  *yuv2packed2 = yuv2rgb32_2_c;
3420  *yuv2packedX = yuv2rgb32_X_c;
3421 #else
3422 #if CONFIG_SWSCALE_ALPHA
3423  if (c->needAlpha) {
3424  *yuv2packed1 = yuv2rgba32_1_c;
3425  *yuv2packed2 = yuv2rgba32_2_c;
3426  *yuv2packedX = yuv2rgba32_X_c;
3427  } else
3428 #endif /* CONFIG_SWSCALE_ALPHA */
3429  {
3430  *yuv2packed1 = yuv2rgbx32_1_c;
3431  *yuv2packed2 = yuv2rgbx32_2_c;
3432  *yuv2packedX = yuv2rgbx32_X_c;
3433  }
3434 #endif /* !CONFIG_SMALL */
3435  break;
3436  case AV_PIX_FMT_RGB32_1:
3437  case AV_PIX_FMT_BGR32_1:
3438 #if CONFIG_SMALL
3439  *yuv2packed1 = yuv2rgb32_1_1_c;
3440  *yuv2packed2 = yuv2rgb32_1_2_c;
3441  *yuv2packedX = yuv2rgb32_1_X_c;
3442 #else
3443 #if CONFIG_SWSCALE_ALPHA
3444  if (c->needAlpha) {
3445  *yuv2packed1 = yuv2rgba32_1_1_c;
3446  *yuv2packed2 = yuv2rgba32_1_2_c;
3447  *yuv2packedX = yuv2rgba32_1_X_c;
3448  } else
3449 #endif /* CONFIG_SWSCALE_ALPHA */
3450  {
3451  *yuv2packed1 = yuv2rgbx32_1_1_c;
3452  *yuv2packed2 = yuv2rgbx32_1_2_c;
3453  *yuv2packedX = yuv2rgbx32_1_X_c;
3454  }
3455 #endif /* !CONFIG_SMALL */
3456  break;
3457  case AV_PIX_FMT_RGB24:
3458  *yuv2packed1 = yuv2rgb24_1_c;
3459  *yuv2packed2 = yuv2rgb24_2_c;
3460  *yuv2packedX = yuv2rgb24_X_c;
3461  break;
3462  case AV_PIX_FMT_BGR24:
3463  *yuv2packed1 = yuv2bgr24_1_c;
3464  *yuv2packed2 = yuv2bgr24_2_c;
3465  *yuv2packedX = yuv2bgr24_X_c;
3466  break;
3467  case AV_PIX_FMT_RGB565LE:
3468  case AV_PIX_FMT_RGB565BE:
3469  case AV_PIX_FMT_BGR565LE:
3470  case AV_PIX_FMT_BGR565BE:
3471  *yuv2packed1 = yuv2rgb16_1_c;
3472  *yuv2packed2 = yuv2rgb16_2_c;
3473  *yuv2packedX = yuv2rgb16_X_c;
3474  break;
3475  case AV_PIX_FMT_RGB555LE:
3476  case AV_PIX_FMT_RGB555BE:
3477  case AV_PIX_FMT_BGR555LE:
3478  case AV_PIX_FMT_BGR555BE:
3479  *yuv2packed1 = yuv2rgb15_1_c;
3480  *yuv2packed2 = yuv2rgb15_2_c;
3481  *yuv2packedX = yuv2rgb15_X_c;
3482  break;
3483  case AV_PIX_FMT_RGB444LE:
3484  case AV_PIX_FMT_RGB444BE:
3485  case AV_PIX_FMT_BGR444LE:
3486  case AV_PIX_FMT_BGR444BE:
3487  *yuv2packed1 = yuv2rgb12_1_c;
3488  *yuv2packed2 = yuv2rgb12_2_c;
3489  *yuv2packedX = yuv2rgb12_X_c;
3490  break;
3491  case AV_PIX_FMT_RGB8:
3492  case AV_PIX_FMT_BGR8:
3493  *yuv2packed1 = yuv2rgb8_1_c;
3494  *yuv2packed2 = yuv2rgb8_2_c;
3495  *yuv2packedX = yuv2rgb8_X_c;
3496  break;
3497  case AV_PIX_FMT_RGB4:
3498  case AV_PIX_FMT_BGR4:
3499  *yuv2packed1 = yuv2rgb4_1_c;
3500  *yuv2packed2 = yuv2rgb4_2_c;
3501  *yuv2packedX = yuv2rgb4_X_c;
3502  break;
3503  case AV_PIX_FMT_RGB4_BYTE:
3504  case AV_PIX_FMT_BGR4_BYTE:
3505  *yuv2packed1 = yuv2rgb4b_1_c;
3506  *yuv2packed2 = yuv2rgb4b_2_c;
3507  *yuv2packedX = yuv2rgb4b_X_c;
3508  break;
3509  case AV_PIX_FMT_X2RGB10LE:
3510  case AV_PIX_FMT_X2RGB10BE:
3511  *yuv2packed1 = yuv2x2rgb10_1_c;
3512  *yuv2packed2 = yuv2x2rgb10_2_c;
3513  *yuv2packedX = yuv2x2rgb10_X_c;
3514  break;
3515  case AV_PIX_FMT_X2BGR10LE:
3516  case AV_PIX_FMT_X2BGR10BE:
3517  *yuv2packed1 = yuv2x2bgr10_1_c;
3518  *yuv2packed2 = yuv2x2bgr10_2_c;
3519  *yuv2packedX = yuv2x2bgr10_X_c;
3520  break;
3521  }
3522  }
3523  switch (dstFormat) {
3524  case AV_PIX_FMT_MONOWHITE:
3525  *yuv2packed1 = yuv2monowhite_1_c;
3526  *yuv2packed2 = yuv2monowhite_2_c;
3527  *yuv2packedX = yuv2monowhite_X_c;
3528  break;
3529  case AV_PIX_FMT_MONOBLACK:
3530  *yuv2packed1 = yuv2monoblack_1_c;
3531  *yuv2packed2 = yuv2monoblack_2_c;
3532  *yuv2packedX = yuv2monoblack_X_c;
3533  break;
3534  case AV_PIX_FMT_YUYV422:
3535  *yuv2packed1 = yuv2yuyv422_1_c;
3536  *yuv2packed2 = yuv2yuyv422_2_c;
3537  *yuv2packedX = yuv2yuyv422_X_c;
3538  break;
3539  case AV_PIX_FMT_YVYU422:
3540  *yuv2packed1 = yuv2yvyu422_1_c;
3541  *yuv2packed2 = yuv2yvyu422_2_c;
3542  *yuv2packedX = yuv2yvyu422_X_c;
3543  break;
3544  case AV_PIX_FMT_UYVY422:
3545  *yuv2packed1 = yuv2uyvy422_1_c;
3546  *yuv2packed2 = yuv2uyvy422_2_c;
3547  *yuv2packedX = yuv2uyvy422_X_c;
3548  break;
3549  case AV_PIX_FMT_VYU444:
3550  *yuv2packed1 = yuv2vyu444_1_c;
3551  *yuv2packed2 = yuv2vyu444_2_c;
3552  *yuv2packedX = yuv2vyu444_X_c;
3553  break;
3554  case AV_PIX_FMT_YA8:
3555  *yuv2packed1 = yuv2ya8_1_c;
3556  *yuv2packed2 = yuv2ya8_2_c;
3557  *yuv2packedX = yuv2ya8_X_c;
3558  break;
3559  case AV_PIX_FMT_YA16LE:
3560  *yuv2packed1 = yuv2ya16le_1_c;
3561  *yuv2packed2 = yuv2ya16le_2_c;
3562  *yuv2packedX = yuv2ya16le_X_c;
3563  break;
3564  case AV_PIX_FMT_YA16BE:
3565  *yuv2packed1 = yuv2ya16be_1_c;
3566  *yuv2packed2 = yuv2ya16be_2_c;
3567  *yuv2packedX = yuv2ya16be_X_c;
3568  break;
3569  case AV_PIX_FMT_V30XLE:
3570  *yuv2packedX = yuv2v30xle_X_c;
3571  break;
3572  case AV_PIX_FMT_AYUV64LE:
3573  *yuv2packedX = yuv2ayuv64le_X_c;
3574  break;
3575  case AV_PIX_FMT_AYUV:
3576  *yuv2packed1 = yuv2ayuv_1_c;
3577  *yuv2packed2 = yuv2ayuv_2_c;
3578  *yuv2packedX = yuv2ayuv_X_c;
3579  break;
3580  case AV_PIX_FMT_VUYA:
3581  case AV_PIX_FMT_VUYX:
3582  *yuv2packed1 = yuv2vuyX_1_c;
3583  *yuv2packed2 = yuv2vuyX_2_c;
3584  *yuv2packedX = yuv2vuyX_X_c;
3585  break;
3586  case AV_PIX_FMT_UYVA:
3587  *yuv2packed1 = yuv2uyva_1_c;
3588  *yuv2packed2 = yuv2uyva_2_c;
3589  *yuv2packedX = yuv2uyva_X_c;
3590  break;
3591  case AV_PIX_FMT_XV30LE:
3592  *yuv2packedX = yuv2xv30le_X_c;
3593  break;
3594  case AV_PIX_FMT_XV36LE:
3595  *yuv2packedX = yuv2xv36le_X_c;
3596  break;
3597  case AV_PIX_FMT_Y210LE:
3598  *yuv2packedX = yuv2y210le_X_c;
3599  break;
3600  case AV_PIX_FMT_Y212LE:
3601  *yuv2packedX = yuv2y212le_X_c;
3602  break;
3603  }
3604 }
yuv2vyu444_2_c
static void yuv2vyu444_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Definition: output.c:2982
yuv2packed2_fn
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:221
A
#define A(x)
Definition: vpx_arith.h:28
AV_PIX_FMT_XV30LE
@ AV_PIX_FMT_XV30LE
packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), little-endian, variant of Y410 where alpha channe...
Definition: pixfmt.h:415
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:115
yuv2packed1_fn
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:188
YUV2PACKEDWRAPPER
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:764
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AV_PIX_FMT_BGR48LE
@ AV_PIX_FMT_BGR48LE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:146
av_clip
#define av_clip
Definition: common.h:100
ff_dither_4x4_16
const uint8_t ff_dither_4x4_16[][8]
Definition: output.c:51
X_DITHER
#define X_DITHER(u, v)
r
const char * r
Definition: vf_curves.c:127
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
AV_PIX_FMT_BGRA64BE
@ AV_PIX_FMT_BGRA64BE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:204
yuv2ya16_2_c_template
static av_always_inline void yuv2ya16_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes, int is_be)
Definition: output.c:989
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
yuv2rgb_X_c_template
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1712
mem_internal.h
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:462
AV_PIX_FMT_RGB444LE
@ AV_PIX_FMT_RGB444LE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:136
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:171
AV_PIX_FMT_GBRP10BE
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:169
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3025
ff_dither_8x8_32
const uint8_t ff_dither_8x8_32[][8]
Definition: output.c:59
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
yuv2rgb_full_1_c_template
static av_always_inline void yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2164
SWS_DITHER_A_DITHER
@ SWS_DITHER_A_DITHER
Definition: swscale_internal.h:74
accumulate_bit
#define accumulate_bit(acc, val)
yuv2ya16_1_c_template
static av_always_inline void yuv2ya16_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes, int is_be)
Definition: output.c:1022
pixdesc.h
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:202
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:344
AV_PIX_FMT_X2BGR10BE
@ AV_PIX_FMT_X2BGR10BE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:387
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:341
yuv2nv12cX_16_c_template
static av_always_inline void yuv2nv12cX_16_c_template(int big_endian, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW, int output_bits)
Definition: output.c:189
b
#define b
Definition: input.c:41
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
R
#define R
Definition: huffyuv.h:44
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:82
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:461
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
b_r
#define b_r
yuv2p01xl1_c
static void yuv2p01xl1_c(const int16_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:473
AV_PIX_FMT_GBRP14BE
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
Definition: pixfmt.h:281
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
mathematics.h
yuv2rgb_full_X_c_template
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2069
yuv2422_2_c_template
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:857
av_float2int
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
A2
@ A2
Definition: mvs.c:525
yuv2plane1_8_c
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:420
AV_PIX_FMT_GRAYF32LE
@ AV_PIX_FMT_GRAYF32LE
IEEE-754 single precision Y, 32bpp, little-endian.
Definition: pixfmt.h:364
yuv2planeX_10_c_template
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:340
AV_PIX_FMT_GBRAP14BE
@ AV_PIX_FMT_GBRAP14BE
planar GBR 4:4:4:4 56bpp, big-endian
Definition: pixfmt.h:432
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:114
AV_PIX_FMT_AYUV64LE
@ AV_PIX_FMT_AYUV64LE
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
Definition: pixfmt.h:302
SH
#define SH(val, pdst)
Definition: generic_macros_msa.h:154
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:311
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:721
rgb
Definition: rpzaenc.c:60
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
yuv2anyX_fn
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:287
yuv2422_X_c_template
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:818
yuv2mono_1_c_template
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:714
yuv2plane1_16_c_template
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:149
yuv2422_1_c_template
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:890
yuv2gbrp_full_X_c
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Definition: output.c:2245
val
static double val(void *priv, double ch)
Definition: aeval.c:77
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:735
R_B
#define R_B
Definition: output.c:940
yuv2plane1_float
#define yuv2plane1_float(template, dest_type, BE_LE)
Definition: output.c:290
AV_PIX_FMT_VUYA
@ AV_PIX_FMT_VUYA
packed VUYA 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), VUYAVUYA...
Definition: pixfmt.h:401
yuv2planeX_16_c_template
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:163
r_b
#define r_b
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:90
avassert.h
YUV2PACKED16WRAPPER
#define YUV2PACKED16WRAPPER(name, base, ext, base_fmt, endianness, hasAlpha, eightbytes)
Definition: output.c:1546
av_cold
#define av_cold
Definition: attributes.h:90
yuv2mono_2_c_template
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:653
yuv2planeX_float
#define yuv2planeX_float(template, dest_type, BE_LE)
Definition: output.c:297
YUVRGB_TABLE_HEADROOM
#define YUVRGB_TABLE_HEADROOM
Definition: swscale_internal.h:44
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale_internal.h:73
float
float
Definition: af_crystalizer.c:122
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:213
yuv2rgb_full_2_c_template
static av_always_inline void yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2119
intreadwrite.h
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:172
output_pixels
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:2691
g
const char * g
Definition: vf_curves.c:128
yuv2ayuv_X_c_template
static av_always_inline void yuv2ayuv_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:2808
AV_PIX_FMT_GBRP12LE
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
Definition: pixfmt.h:280
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
yuv2plane1_float_bswap_c_template
static av_always_inline void yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
Definition: output.c:235
ff_dither_2x2_4
const uint8_t ff_dither_2x2_4[][8]
Definition: output.c:39
ff_dither_8x8_220
const uint8_t ff_dither_8x8_220[][8]
Definition: output.c:84
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
yuv2ya8_X_c
static void yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2532
AV_PIX_FMT_RGB4
@ AV_PIX_FMT_RGB4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:94
AV_PIX_FMT_GBRP10LE
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:170
yuv2p01xlX_c
static void yuv2p01xlX_c(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:487
yuv2rgba64_2_c_template
static av_always_inline void yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1132
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:463
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
yuv2rgba64_full_1_c_template
static av_always_inline void yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1420
if
if(ret)
Definition: filter_design.txt:179
isSemiPlanarYUV
static av_always_inline int isSemiPlanarYUV(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:767
yuv2nv12cX_16BE_c
static void yuv2nv12cX_16BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:397
yuv2NBPS
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:360
SWS_DITHER_NONE
@ SWS_DITHER_NONE
Definition: swscale_internal.h:70
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:477
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:113
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:343
AV_PIX_FMT_GBRAP12BE
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
Definition: pixfmt.h:310
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:478
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
yuv2ayuv_1_c_template
static av_always_inline void yuv2ayuv_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:2710
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:110
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:210
yuv2gbrp16_full_X_c
static void yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2327
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
V
#define V
Definition: avdct.c:31
AV_PIX_FMT_BGR565LE
@ AV_PIX_FMT_BGR565LE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:118
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:203
AV_PIX_FMT_Y210LE
@ AV_PIX_FMT_Y210LE
packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, little-endian
Definition: pixfmt.h:382
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
yuv2ya8_2_c
static void yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Definition: output.c:2501
AV_PIX_FMT_BGR4
@ AV_PIX_FMT_BGR4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:91
AV_PIX_FMT_BGR555BE
@ AV_PIX_FMT_BGR555BE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:119
yuv2plane1_float_c_template
static av_always_inline void yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
Definition: output.c:219
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:92
yuv2ya16_X_c_template
static av_always_inline void yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **unused_chrUSrc, const int32_t **unused_chrVSrc, int unused_chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes, int is_be)
Definition: output.c:950
A_DITHER
#define A_DITHER(u, v)
AV_PIX_FMT_X2RGB10LE
@ AV_PIX_FMT_X2RGB10LE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:384
isDataInHighBits
static av_always_inline int isDataInHighBits(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:930
yuv2y2xx_wrapper
#define yuv2y2xx_wrapper(bits)
Definition: output.c:2899
AV_PIX_FMT_X2BGR10
#define AV_PIX_FMT_X2BGR10
Definition: pixfmt.h:547
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:742
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
A1
@ A1
Definition: mvs.c:524
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:261
av_bswap32
#define av_bswap32
Definition: bswap.h:47
yuv2rgba64_full_X_c_template
static av_always_inline void yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1302
AV_PIX_FMT_RGB444BE
@ AV_PIX_FMT_RGB444BE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:137
SWS_FULL_CHR_H_INT
#define SWS_FULL_CHR_H_INT
Perform full chroma upsampling when upscaling to RGB.
Definition: swscale.h:97
yuv2planeX_float_bswap_c_template
static av_always_inline void yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint32_t *dest, int dstW)
Definition: output.c:271
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:209
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:473
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:480
AV_PIX_FMT_GBRP9BE
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
Definition: pixfmt.h:167
yuv2rgba64_full_2_c_template
static av_always_inline void yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1367
AV_PIX_FMT_BGR444BE
@ AV_PIX_FMT_BGR444BE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:139
output_pixel
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:2896
AV_PIX_FMT_GBRP9LE
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
Definition: pixfmt.h:168
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:460
AV_PIX_FMT_GBRAP10LE
@ AV_PIX_FMT_GBRAP10LE
planar GBR 4:4:4:4 40bpp, little-endian
Definition: pixfmt.h:314
isSwappedChroma
static av_always_inline int isSwappedChroma(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:949
AV_PIX_FMT_BGR565BE
@ AV_PIX_FMT_BGR565BE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:117
yuv2nv12cX_c
static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:430
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
yuv2rgb_write_full
static av_always_inline void yuv2rgb_write_full(SwsContext *c, uint8_t *dest, int i, int Y, int A, int U, int V, int y, enum AVPixelFormat target, int hasAlpha, int err[4])
Definition: output.c:1918
yuv2vyu444_X_c
static void yuv2vyu444_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:3016
ff_dither_8x8_73
const uint8_t ff_dither_8x8_73[][8]
Definition: output.c:71
Y
#define Y
Definition: boxblur.h:37
yuv2rgb_write
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1586
AV_PIX_FMT_AYUV
@ AV_PIX_FMT_AYUV
packed AYUV 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), AYUVAYUV...
Definition: pixfmt.h:442
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
AV_PIX_FMT_BGRA64LE
@ AV_PIX_FMT_BGRA64LE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
yuv2planeX_8_c
static void yuv2planeX_8_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:405
AV_PIX_FMT_UYVA
@ AV_PIX_FMT_UYVA
packed UYVA 4:4:4:4, 32bpp (1 Cr & Cb sample per 1x1 Y & A samples), UYVAUYVA...
Definition: pixfmt.h:444
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:482
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:115
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:109
AV_PIX_FMT_YA16
#define AV_PIX_FMT_YA16
Definition: pixfmt.h:472
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_PIX_FMT_BGR444
#define AV_PIX_FMT_BGR444
Definition: pixfmt.h:481
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:475
yuv2planeX_float_c_template
static av_always_inline void yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src, float *dest, int dstW)
Definition: output.c:251
av_always_inline
#define av_always_inline
Definition: attributes.h:49
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:151
yuv2gbrpf32_full_X_c
static void yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2399
SUINT
#define SUINT
Definition: dct32_template.c:30
AV_PIX_FMT_X2RGB10
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:546
AV_PIX_FMT_X2RGB10BE
@ AV_PIX_FMT_X2RGB10BE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:385
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:479
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:95
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:342
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:474
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:214
AV_PIX_FMT_YVYU422
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
Definition: pixfmt.h:207
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:3057
bswap.h
AV_PIX_FMT_Y212LE
@ AV_PIX_FMT_Y212LE
packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits,...
Definition: pixfmt.h:412
yuv2v30_X_c_template
static av_always_inline void yuv2v30_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, int shift)
Definition: output.c:2618
V30LE_WRAPPER
#define V30LE_WRAPPER(name, shift)
Definition: output.c:2648
AV_PIX_FMT_GRAYF32BE
@ AV_PIX_FMT_GRAYF32BE
IEEE-754 single precision Y, 32bpp, big-endian.
Definition: pixfmt.h:363
yuv2ya8_1_c
static void yuv2ya8_1_c(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y)
Definition: output.c:2475
YUV2RGBWRAPPER
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1885
AV_PIX_FMT_GBRP12BE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
Definition: pixfmt.h:279
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:131
yuv2p01x_wrapper
#define yuv2p01x_wrapper(bits)
Definition: output.c:531
yuv2packedX_fn
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:253
yuv2p01xcX_c
static void yuv2p01xcX_c(int big_endian, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW, int output_bits)
Definition: output.c:505
yuv2rgba64_1_c_template
static av_always_inline void yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1198
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
G
#define G
Definition: huffyuv.h:43
AV_PIX_FMT_RGB565BE
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:112
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
yuv2ayuv_2_c_template
static av_always_inline void yuv2ayuv_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:2766
desc
const char * desc
Definition: libsvtav1.c:79
avutil.h
AV_PIX_FMT_X2BGR10LE
@ AV_PIX_FMT_X2BGR10LE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:386
AV_PIX_FMT_V30XLE
@ AV_PIX_FMT_V30XLE
packed VYUX 4:4:4 like XV30, 32bpp, (msb)10V 10Y 10U 2X(lsb), little-endian
Definition: pixfmt.h:449
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AV_PIX_FMT_BGR555LE
@ AV_PIX_FMT_BGR555LE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:120
yuv2rgb_1_c_template
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1806
AYUVPACKEDWRAPPER
#define AYUVPACKEDWRAPPER(name, fmt)
Definition: output.c:2860
SWS_DITHER_AUTO
@ SWS_DITHER_AUTO
Definition: swscale_internal.h:71
AV_PIX_FMT_XV36LE
@ AV_PIX_FMT_XV36LE
packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, little-endian,...
Definition: pixfmt.h:418
B_R
#define B_R
Definition: output.c:941
AV_PIX_FMT_GBRP14LE
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
Definition: pixfmt.h:282
int32_t
int32_t
Definition: audioconvert.c:56
yuv2rgb_2_c_template
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1766
AV_PIX_FMT_GBRAP10BE
@ AV_PIX_FMT_GBRAP10BE
planar GBR 4:4:4:4 40bpp, big-endian
Definition: pixfmt.h:313
d128
const uint8_t * d128
Definition: yuv2rgb.c:458
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:406
AV_PIX_FMT_VYU444
@ AV_PIX_FMT_VYU444
packed VYU 4:4:4, 24bpp (1 Cr & Cb sample per 1x1 Y), VYUVYU...
Definition: pixfmt.h:446
av_bswap16
#define av_bswap16
Definition: bswap.h:28
SWS_DITHER_X_DITHER
@ SWS_DITHER_X_DITHER
Definition: swscale_internal.h:75
SwsContext
Definition: swscale_internal.h:324
AV_PIX_FMT_BGR444LE
@ AV_PIX_FMT_BGR444LE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:138
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:263
yuv2xv36le_X_c
static void yuv2xv36le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2665
rgb2rgb.h
src
#define src
Definition: vp8dsp.c:248
AV_PIX_FMT_GBRAP14LE
@ AV_PIX_FMT_GBRAP14LE
planar GBR 4:4:4:4 56bpp, little-endian
Definition: pixfmt.h:433
swscale.h
yuv2ayuv64le_X_c
static void yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **_lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **_chrUSrc, const int16_t **_chrVSrc, int chrFilterSize, const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2568
yuv2rgba64_X_c_template
static av_always_inline void yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1049
ff_dither_2x2_8
const uint8_t ff_dither_2x2_8[][8]
Definition: output.c:45
AV_PIX_FMT_BGR48BE
@ AV_PIX_FMT_BGR48BE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:145
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:476
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:62