FFmpeg
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "rgb2rgb.h"
36 #include "swscale.h"
37 #include "swscale_internal.h"
38 
39 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_4)[][8] = {
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
43 };
44 
45 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_8)[][8] = {
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
49 };
50 
51 DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[][8] = {
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
57 };
58 
59 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[][8] = {
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
69 };
70 
71 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[][8] = {
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
81 };
82 
83 #if 1
84 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
94 };
95 #elif 1
96 // tries to correct a gamma of 1.5
97 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
107 };
108 #elif 1
109 // tries to correct a gamma of 2.0
110 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
120 };
121 #else
122 // tries to correct a gamma of 2.5
123 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
133 };
134 #endif
135 
136 #define output_pixel(pos, val, bias, signedness) \
137  if (big_endian) { \
138  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
139  } else { \
140  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
141  }
142 
143 static av_always_inline void
144 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
145  int big_endian, int output_bits)
146 {
147  int i;
148  int shift = 3;
149  av_assert0(output_bits == 16);
150 
151  for (i = 0; i < dstW; i++) {
152  int val = src[i] + (1 << (shift - 1));
153  output_pixel(&dest[i], val, 0, uint);
154  }
155 }
156 
157 static av_always_inline void
158 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
159  const int32_t **src, uint16_t *dest, int dstW,
160  int big_endian, int output_bits)
161 {
162  int i;
163  int shift = 15;
164  av_assert0(output_bits == 16);
165 
166  for (i = 0; i < dstW; i++) {
167  int val = 1 << (shift - 1);
168  int j;
169 
170  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
171  * filters (or anything with negative coeffs, the range can be slightly
172  * wider in both directions. To account for this overflow, we subtract
173  * a constant so it always fits in the signed range (assuming a
174  * reasonable filterSize), and re-add that at the end. */
175  val -= 0x40000000;
176  for (j = 0; j < filterSize; j++)
177  val += src[j][i] * (unsigned)filter[j];
178 
179  output_pixel(&dest[i], val, 0x8000, int);
180  }
181 }
182 
183 static av_always_inline void
184 yuv2nv12cX_16_c_template(int big_endian, const uint8_t *chrDither,
185  const int16_t *chrFilter, int chrFilterSize,
186  const int16_t **chrUSrc, const int16_t **chrVSrc,
187  uint8_t *dest8, int chrDstW, int output_bits)
188 {
189  uint16_t *dest = (uint16_t*)dest8;
190  const int32_t **uSrc = (const int32_t **)chrUSrc;
191  const int32_t **vSrc = (const int32_t **)chrVSrc;
192  int shift = 15;
193  int i, j;
194  av_assert0(output_bits == 16);
195 
196  for (i = 0; i < chrDstW; i++) {
197  int u = 1 << (shift - 1);
198  int v = 1 << (shift - 1);
199 
200  /* See yuv2planeX_16_c_template for details. */
201  u -= 0x40000000;
202  v -= 0x40000000;
203  for (j = 0; j < chrFilterSize; j++) {
204  u += uSrc[j][i] * (unsigned)chrFilter[j];
205  v += vSrc[j][i] * (unsigned)chrFilter[j];
206  }
207 
208  output_pixel(&dest[2*i] , u, 0x8000, int);
209  output_pixel(&dest[2*i+1], v, 0x8000, int);
210  }
211 }
212 
213 static av_always_inline void
214 yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
215 {
216  static const int big_endian = HAVE_BIGENDIAN;
217  static const int shift = 3;
218  static const float float_mult = 1.0f / 65535.0f;
219  int i, val;
220  uint16_t val_uint;
221 
222  for (i = 0; i < dstW; ++i){
223  val = src[i] + (1 << (shift - 1));
224  output_pixel(&val_uint, val, 0, uint);
225  dest[i] = float_mult * (float)val_uint;
226  }
227 }
228 
229 static av_always_inline void
230 yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
231 {
232  static const int big_endian = HAVE_BIGENDIAN;
233  static const int shift = 3;
234  static const float float_mult = 1.0f / 65535.0f;
235  int i, val;
236  uint16_t val_uint;
237 
238  for (i = 0; i < dstW; ++i){
239  val = src[i] + (1 << (shift - 1));
240  output_pixel(&val_uint, val, 0, uint);
241  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
242  }
243 }
244 
245 static av_always_inline void
246 yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src,
247  float *dest, int dstW)
248 {
249  static const int big_endian = HAVE_BIGENDIAN;
250  static const int shift = 15;
251  static const float float_mult = 1.0f / 65535.0f;
252  int i, j, val;
253  uint16_t val_uint;
254 
255  for (i = 0; i < dstW; ++i){
256  val = (1 << (shift - 1)) - 0x40000000;
257  for (j = 0; j < filterSize; ++j){
258  val += src[j][i] * (unsigned)filter[j];
259  }
260  output_pixel(&val_uint, val, 0x8000, int);
261  dest[i] = float_mult * (float)val_uint;
262  }
263 }
264 
265 static av_always_inline void
266 yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src,
267  uint32_t *dest, int dstW)
268 {
269  static const int big_endian = HAVE_BIGENDIAN;
270  static const int shift = 15;
271  static const float float_mult = 1.0f / 65535.0f;
272  int i, j, val;
273  uint16_t val_uint;
274 
275  for (i = 0; i < dstW; ++i){
276  val = (1 << (shift - 1)) - 0x40000000;
277  for (j = 0; j < filterSize; ++j){
278  val += src[j][i] * (unsigned)filter[j];
279  }
280  output_pixel(&val_uint, val, 0x8000, int);
281  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
282  }
283 }
284 
285 #define yuv2plane1_float(template, dest_type, BE_LE) \
286 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \
287  const uint8_t *dither, int offset) \
288 { \
289  template((const int32_t *)src, (dest_type *)dest, dstW); \
290 }
291 
292 #define yuv2planeX_float(template, dest_type, BE_LE) \
293 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \
294  const int16_t **src, uint8_t *dest, int dstW, \
295  const uint8_t *dither, int offset) \
296 { \
297  template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \
298 }
299 
300 #if HAVE_BIGENDIAN
305 #else
310 #endif
311 
312 #undef output_pixel
313 
314 #define output_pixel(pos, val) \
315  if (big_endian) { \
316  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
317  } else { \
318  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
319  }
320 
321 static av_always_inline void
322 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
323  int big_endian, int output_bits)
324 {
325  int i;
326  int shift = 15 - output_bits;
327 
328  for (i = 0; i < dstW; i++) {
329  int val = src[i] + (1 << (shift - 1));
330  output_pixel(&dest[i], val);
331  }
332 }
333 
334 static av_always_inline void
335 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
336  const int16_t **src, uint16_t *dest, int dstW,
337  int big_endian, int output_bits)
338 {
339  int i;
340  int shift = 11 + 16 - output_bits;
341 
342  for (i = 0; i < dstW; i++) {
343  int val = 1 << (shift - 1);
344  int j;
345 
346  for (j = 0; j < filterSize; j++)
347  val += src[j][i] * filter[j];
348 
349  output_pixel(&dest[i], val);
350  }
351 }
352 
353 #undef output_pixel
354 
355 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
356 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
357  uint8_t *dest, int dstW, \
358  const uint8_t *dither, int offset)\
359 { \
360  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
361  (uint16_t *) dest, dstW, is_be, bits); \
362 }\
363 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
364  const int16_t **src, uint8_t *dest, int dstW, \
365  const uint8_t *dither, int offset)\
366 { \
367  yuv2planeX_## template_size ## _c_template(filter, \
368  filterSize, (const typeX_t **) src, \
369  (uint16_t *) dest, dstW, is_be, bits); \
370 }
371 
372 yuv2NBPS( 9, BE, 1, 10, int16_t)
373 yuv2NBPS( 9, LE, 0, 10, int16_t)
374 yuv2NBPS(10, BE, 1, 10, int16_t)
375 yuv2NBPS(10, LE, 0, 10, int16_t)
376 yuv2NBPS(12, BE, 1, 10, int16_t)
377 yuv2NBPS(12, LE, 0, 10, int16_t)
378 yuv2NBPS(14, BE, 1, 10, int16_t)
379 yuv2NBPS(14, LE, 0, 10, int16_t)
380 yuv2NBPS(16, BE, 1, 16, int32_t)
381 yuv2NBPS(16, LE, 0, 16, int32_t)
382 
383 
384 static void yuv2nv12cX_16LE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
385  const int16_t *chrFilter, int chrFilterSize,
386  const int16_t **chrUSrc, const int16_t **chrVSrc,
387  uint8_t *dest8, int chrDstW)
388 {
389  yuv2nv12cX_16_c_template(0, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW, 16);
390 }
391 
392 static void yuv2nv12cX_16BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
393  const int16_t *chrFilter, int chrFilterSize,
394  const int16_t **chrUSrc, const int16_t **chrVSrc,
395  uint8_t *dest8, int chrDstW)
396 {
397  yuv2nv12cX_16_c_template(1, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW, 16);
398 }
399 
400 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
401  const int16_t **src, uint8_t *dest, int dstW,
402  const uint8_t *dither, int offset)
403 {
404  int i;
405  for (i=0; i<dstW; i++) {
406  int val = dither[(i + offset) & 7] << 12;
407  int j;
408  for (j=0; j<filterSize; j++)
409  val += src[j][i] * filter[j];
410 
411  dest[i]= av_clip_uint8(val>>19);
412  }
413 }
414 
415 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
416  const uint8_t *dither, int offset)
417 {
418  int i;
419  for (i=0; i<dstW; i++) {
420  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
421  dest[i]= av_clip_uint8(val);
422  }
423 }
424 
425 static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
426  const int16_t *chrFilter, int chrFilterSize,
427  const int16_t **chrUSrc, const int16_t **chrVSrc,
428  uint8_t *dest, int chrDstW)
429 {
430  int i;
431 
432  if (!isSwappedChroma(dstFormat))
433  for (i=0; i<chrDstW; i++) {
434  int u = chrDither[i & 7] << 12;
435  int v = chrDither[(i + 3) & 7] << 12;
436  int j;
437  for (j=0; j<chrFilterSize; j++) {
438  u += chrUSrc[j][i] * chrFilter[j];
439  v += chrVSrc[j][i] * chrFilter[j];
440  }
441 
442  dest[2*i]= av_clip_uint8(u>>19);
443  dest[2*i+1]= av_clip_uint8(v>>19);
444  }
445  else
446  for (i=0; i<chrDstW; i++) {
447  int u = chrDither[i & 7] << 12;
448  int v = chrDither[(i + 3) & 7] << 12;
449  int j;
450  for (j=0; j<chrFilterSize; j++) {
451  u += chrUSrc[j][i] * chrFilter[j];
452  v += chrVSrc[j][i] * chrFilter[j];
453  }
454 
455  dest[2*i]= av_clip_uint8(v>>19);
456  dest[2*i+1]= av_clip_uint8(u>>19);
457  }
458 }
459 
460 
461 #define output_pixel(pos, val) \
462  if (big_endian) { \
463  AV_WB16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
464  } else { \
465  AV_WL16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
466  }
467 
468 static void yuv2p010l1_c(const int16_t *src,
469  uint16_t *dest, int dstW,
470  int big_endian)
471 {
472  int i;
473  int shift = 5;
474 
475  for (i = 0; i < dstW; i++) {
476  int val = src[i] + (1 << (shift - 1));
477  output_pixel(&dest[i], val);
478  }
479 }
480 
481 static void yuv2p010lX_c(const int16_t *filter, int filterSize,
482  const int16_t **src, uint16_t *dest, int dstW,
483  int big_endian)
484 {
485  int i, j;
486  int shift = 17;
487 
488  for (i = 0; i < dstW; i++) {
489  int val = 1 << (shift - 1);
490 
491  for (j = 0; j < filterSize; j++)
492  val += src[j][i] * filter[j];
493 
494  output_pixel(&dest[i], val);
495  }
496 }
497 
498 static void yuv2p010cX_c(int big_endian, const uint8_t *chrDither,
499  const int16_t *chrFilter, int chrFilterSize,
500  const int16_t **chrUSrc, const int16_t **chrVSrc,
501  uint8_t *dest8, int chrDstW)
502 {
503  uint16_t *dest = (uint16_t*)dest8;
504  int shift = 17;
505  int i, j;
506 
507  for (i = 0; i < chrDstW; i++) {
508  int u = 1 << (shift - 1);
509  int v = 1 << (shift - 1);
510 
511  for (j = 0; j < chrFilterSize; j++) {
512  u += chrUSrc[j][i] * chrFilter[j];
513  v += chrVSrc[j][i] * chrFilter[j];
514  }
515 
516  output_pixel(&dest[2*i] , u);
517  output_pixel(&dest[2*i+1], v);
518  }
519 }
520 
521 static void yuv2p010l1_LE_c(const int16_t *src,
522  uint8_t *dest, int dstW,
523  const uint8_t *dither, int offset)
524 {
525  yuv2p010l1_c(src, (uint16_t*)dest, dstW, 0);
526 }
527 
528 static void yuv2p010l1_BE_c(const int16_t *src,
529  uint8_t *dest, int dstW,
530  const uint8_t *dither, int offset)
531 {
532  yuv2p010l1_c(src, (uint16_t*)dest, dstW, 1);
533 }
534 
535 static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize,
536  const int16_t **src, uint8_t *dest, int dstW,
537  const uint8_t *dither, int offset)
538 {
539  yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0);
540 }
541 
542 static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize,
543  const int16_t **src, uint8_t *dest, int dstW,
544  const uint8_t *dither, int offset)
545 {
546  yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1);
547 }
548 
549 static void yuv2p010cX_LE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
550  const int16_t *chrFilter, int chrFilterSize,
551  const int16_t **chrUSrc, const int16_t **chrVSrc,
552  uint8_t *dest8, int chrDstW)
553 {
554  yuv2p010cX_c(0, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW);
555 }
556 
557 static void yuv2p010cX_BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
558  const int16_t *chrFilter, int chrFilterSize,
559  const int16_t **chrUSrc, const int16_t **chrVSrc,
560  uint8_t *dest8, int chrDstW)
561 {
562  yuv2p010cX_c(1, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW);
563 }
564 
565 #undef output_pixel
566 
567 
568 #define accumulate_bit(acc, val) \
569  acc <<= 1; \
570  acc |= (val) >= 234
571 #define output_pixel(pos, acc) \
572  if (target == AV_PIX_FMT_MONOBLACK) { \
573  pos = acc; \
574  } else { \
575  pos = ~acc; \
576  }
577 
578 static av_always_inline void
579 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
580  const int16_t **lumSrc, int lumFilterSize,
581  const int16_t *chrFilter, const int16_t **chrUSrc,
582  const int16_t **chrVSrc, int chrFilterSize,
583  const int16_t **alpSrc, uint8_t *dest, int dstW,
584  int y, enum AVPixelFormat target)
585 {
586  const uint8_t * const d128 = ff_dither_8x8_220[y&7];
587  int i;
588  unsigned acc = 0;
589  int err = 0;
590 
591  for (i = 0; i < dstW; i += 2) {
592  int j;
593  int Y1 = 1 << 18;
594  int Y2 = 1 << 18;
595 
596  for (j = 0; j < lumFilterSize; j++) {
597  Y1 += lumSrc[j][i] * lumFilter[j];
598  Y2 += lumSrc[j][i+1] * lumFilter[j];
599  }
600  Y1 >>= 19;
601  Y2 >>= 19;
602  if ((Y1 | Y2) & 0x100) {
603  Y1 = av_clip_uint8(Y1);
604  Y2 = av_clip_uint8(Y2);
605  }
606  if (c->dither == SWS_DITHER_ED) {
607  Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
608  c->dither_error[0][i] = err;
609  acc = 2*acc + (Y1 >= 128);
610  Y1 -= 220*(acc&1);
611 
612  err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
613  c->dither_error[0][i+1] = Y1;
614  acc = 2*acc + (err >= 128);
615  err -= 220*(acc&1);
616  } else {
617  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
618  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
619  }
620  if ((i & 7) == 6) {
621  output_pixel(*dest++, acc);
622  }
623  }
624  c->dither_error[0][i] = err;
625 
626  if (i & 6) {
627  output_pixel(*dest, acc);
628  }
629 }
630 
631 static av_always_inline void
632 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
633  const int16_t *ubuf[2], const int16_t *vbuf[2],
634  const int16_t *abuf[2], uint8_t *dest, int dstW,
635  int yalpha, int uvalpha, int y,
636  enum AVPixelFormat target)
637 {
638  const int16_t *buf0 = buf[0], *buf1 = buf[1];
639  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
640  int yalpha1 = 4096 - yalpha;
641  int i;
642  av_assert2(yalpha <= 4096U);
643 
644  if (c->dither == SWS_DITHER_ED) {
645  int err = 0;
646  int acc = 0;
647  for (i = 0; i < dstW; i +=2) {
648  int Y;
649 
650  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
651  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
652  c->dither_error[0][i] = err;
653  acc = 2*acc + (Y >= 128);
654  Y -= 220*(acc&1);
655 
656  err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
657  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
658  c->dither_error[0][i+1] = Y;
659  acc = 2*acc + (err >= 128);
660  err -= 220*(acc&1);
661 
662  if ((i & 7) == 6)
663  output_pixel(*dest++, acc);
664  }
665  c->dither_error[0][i] = err;
666  } else {
667  for (i = 0; i < dstW; i += 8) {
668  int Y, acc = 0;
669 
670  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
671  accumulate_bit(acc, Y + d128[0]);
672  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
673  accumulate_bit(acc, Y + d128[1]);
674  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
675  accumulate_bit(acc, Y + d128[2]);
676  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
677  accumulate_bit(acc, Y + d128[3]);
678  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
679  accumulate_bit(acc, Y + d128[4]);
680  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
681  accumulate_bit(acc, Y + d128[5]);
682  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
683  accumulate_bit(acc, Y + d128[6]);
684  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
685  accumulate_bit(acc, Y + d128[7]);
686 
687  output_pixel(*dest++, acc);
688  }
689  }
690 }
691 
692 static av_always_inline void
693 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
694  const int16_t *ubuf[2], const int16_t *vbuf[2],
695  const int16_t *abuf0, uint8_t *dest, int dstW,
696  int uvalpha, int y, enum AVPixelFormat target)
697 {
698  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
699  int i;
700 
701  if (c->dither == SWS_DITHER_ED) {
702  int err = 0;
703  int acc = 0;
704  for (i = 0; i < dstW; i +=2) {
705  int Y;
706 
707  Y = ((buf0[i + 0] + 64) >> 7);
708  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
709  c->dither_error[0][i] = err;
710  acc = 2*acc + (Y >= 128);
711  Y -= 220*(acc&1);
712 
713  err = ((buf0[i + 1] + 64) >> 7);
714  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
715  c->dither_error[0][i+1] = Y;
716  acc = 2*acc + (err >= 128);
717  err -= 220*(acc&1);
718 
719  if ((i & 7) == 6)
720  output_pixel(*dest++, acc);
721  }
722  c->dither_error[0][i] = err;
723  } else {
724  for (i = 0; i < dstW; i += 8) {
725  int acc = 0;
726  accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
727  accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
728  accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
729  accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
730  accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
731  accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
732  accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
733  accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
734 
735  output_pixel(*dest++, acc);
736  }
737  }
738 }
739 
740 #undef output_pixel
741 #undef accumulate_bit
742 
743 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
744 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
745  const int16_t **lumSrc, int lumFilterSize, \
746  const int16_t *chrFilter, const int16_t **chrUSrc, \
747  const int16_t **chrVSrc, int chrFilterSize, \
748  const int16_t **alpSrc, uint8_t *dest, int dstW, \
749  int y) \
750 { \
751  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
752  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
753  alpSrc, dest, dstW, y, fmt); \
754 } \
755  \
756 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
757  const int16_t *ubuf[2], const int16_t *vbuf[2], \
758  const int16_t *abuf[2], uint8_t *dest, int dstW, \
759  int yalpha, int uvalpha, int y) \
760 { \
761  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
762  dest, dstW, yalpha, uvalpha, y, fmt); \
763 } \
764  \
765 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
766  const int16_t *ubuf[2], const int16_t *vbuf[2], \
767  const int16_t *abuf0, uint8_t *dest, int dstW, \
768  int uvalpha, int y) \
769 { \
770  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
771  abuf0, dest, dstW, uvalpha, \
772  y, fmt); \
773 }
774 
775 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
776 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
777 
778 #define output_pixels(pos, Y1, U, Y2, V) \
779  if (target == AV_PIX_FMT_YUYV422) { \
780  dest[pos + 0] = Y1; \
781  dest[pos + 1] = U; \
782  dest[pos + 2] = Y2; \
783  dest[pos + 3] = V; \
784  } else if (target == AV_PIX_FMT_YVYU422) { \
785  dest[pos + 0] = Y1; \
786  dest[pos + 1] = V; \
787  dest[pos + 2] = Y2; \
788  dest[pos + 3] = U; \
789  } else { /* AV_PIX_FMT_UYVY422 */ \
790  dest[pos + 0] = U; \
791  dest[pos + 1] = Y1; \
792  dest[pos + 2] = V; \
793  dest[pos + 3] = Y2; \
794  }
795 
796 static av_always_inline void
797 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
798  const int16_t **lumSrc, int lumFilterSize,
799  const int16_t *chrFilter, const int16_t **chrUSrc,
800  const int16_t **chrVSrc, int chrFilterSize,
801  const int16_t **alpSrc, uint8_t *dest, int dstW,
802  int y, enum AVPixelFormat target)
803 {
804  int i;
805 
806  for (i = 0; i < ((dstW + 1) >> 1); i++) {
807  int j;
808  int Y1 = 1 << 18;
809  int Y2 = 1 << 18;
810  int U = 1 << 18;
811  int V = 1 << 18;
812 
813  for (j = 0; j < lumFilterSize; j++) {
814  Y1 += lumSrc[j][i * 2] * lumFilter[j];
815  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
816  }
817  for (j = 0; j < chrFilterSize; j++) {
818  U += chrUSrc[j][i] * chrFilter[j];
819  V += chrVSrc[j][i] * chrFilter[j];
820  }
821  Y1 >>= 19;
822  Y2 >>= 19;
823  U >>= 19;
824  V >>= 19;
825  if ((Y1 | Y2 | U | V) & 0x100) {
826  Y1 = av_clip_uint8(Y1);
827  Y2 = av_clip_uint8(Y2);
828  U = av_clip_uint8(U);
829  V = av_clip_uint8(V);
830  }
831  output_pixels(4*i, Y1, U, Y2, V);
832  }
833 }
834 
835 static av_always_inline void
836 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
837  const int16_t *ubuf[2], const int16_t *vbuf[2],
838  const int16_t *abuf[2], uint8_t *dest, int dstW,
839  int yalpha, int uvalpha, int y,
840  enum AVPixelFormat target)
841 {
842  const int16_t *buf0 = buf[0], *buf1 = buf[1],
843  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
844  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
845  int yalpha1 = 4096 - yalpha;
846  int uvalpha1 = 4096 - uvalpha;
847  int i;
848  av_assert2(yalpha <= 4096U);
849  av_assert2(uvalpha <= 4096U);
850 
851  for (i = 0; i < ((dstW + 1) >> 1); i++) {
852  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
853  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
854  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
855  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
856 
857  if ((Y1 | Y2 | U | V) & 0x100) {
858  Y1 = av_clip_uint8(Y1);
859  Y2 = av_clip_uint8(Y2);
860  U = av_clip_uint8(U);
861  V = av_clip_uint8(V);
862  }
863 
864  output_pixels(i * 4, Y1, U, Y2, V);
865  }
866 }
867 
868 static av_always_inline void
869 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
870  const int16_t *ubuf[2], const int16_t *vbuf[2],
871  const int16_t *abuf0, uint8_t *dest, int dstW,
872  int uvalpha, int y, enum AVPixelFormat target)
873 {
874  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
875  int i;
876 
877  if (uvalpha < 2048) {
878  for (i = 0; i < ((dstW + 1) >> 1); i++) {
879  int Y1 = (buf0[i * 2 ]+64) >> 7;
880  int Y2 = (buf0[i * 2 + 1]+64) >> 7;
881  int U = (ubuf0[i] +64) >> 7;
882  int V = (vbuf0[i] +64) >> 7;
883 
884  if ((Y1 | Y2 | U | V) & 0x100) {
885  Y1 = av_clip_uint8(Y1);
886  Y2 = av_clip_uint8(Y2);
887  U = av_clip_uint8(U);
888  V = av_clip_uint8(V);
889  }
890 
891  output_pixels(i * 4, Y1, U, Y2, V);
892  }
893  } else {
894  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
895  for (i = 0; i < ((dstW + 1) >> 1); i++) {
896  int Y1 = (buf0[i * 2 ] + 64) >> 7;
897  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
898  int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
899  int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
900 
901  if ((Y1 | Y2 | U | V) & 0x100) {
902  Y1 = av_clip_uint8(Y1);
903  Y2 = av_clip_uint8(Y2);
904  U = av_clip_uint8(U);
905  V = av_clip_uint8(V);
906  }
907 
908  output_pixels(i * 4, Y1, U, Y2, V);
909  }
910  }
911 }
912 
913 #undef output_pixels
914 
915 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
916 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
917 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
918 
919 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
920 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
921 #define output_pixel(pos, val) \
922  if (isBE(target)) { \
923  AV_WB16(pos, val); \
924  } else { \
925  AV_WL16(pos, val); \
926  }
927 
928 static av_always_inline void
929 yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter,
930  const int32_t **lumSrc, int lumFilterSize,
931  const int16_t *chrFilter, const int32_t **unused_chrUSrc,
932  const int32_t **unused_chrVSrc, int unused_chrFilterSize,
933  const int32_t **alpSrc, uint16_t *dest, int dstW,
934  int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
935 {
936  int hasAlpha = !!alpSrc;
937  int i;
938 
939  for (i = 0; i < dstW; i++) {
940  int j;
941  int Y = -0x40000000;
942  int A = 0xffff;
943 
944  for (j = 0; j < lumFilterSize; j++)
945  Y += lumSrc[j][i] * lumFilter[j];
946 
947  Y >>= 15;
948  Y += (1<<3) + 0x8000;
949  Y = av_clip_uint16(Y);
950 
951  if (hasAlpha) {
952  A = -0x40000000 + (1<<14);
953  for (j = 0; j < lumFilterSize; j++)
954  A += alpSrc[j][i] * lumFilter[j];
955 
956  A >>= 15;
957  A += 0x8000;
958  A = av_clip_uint16(A);
959  }
960 
961  output_pixel(&dest[2 * i ], Y);
962  output_pixel(&dest[2 * i + 1], A);
963  }
964 }
965 
966 static av_always_inline void
968  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
969  const int32_t *abuf[2], uint16_t *dest, int dstW,
970  int yalpha, int unused_uvalpha, int y,
971  enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
972 {
973  int hasAlpha = abuf && abuf[0] && abuf[1];
974  const int32_t *buf0 = buf[0], *buf1 = buf[1],
975  *abuf0 = hasAlpha ? abuf[0] : NULL,
976  *abuf1 = hasAlpha ? abuf[1] : NULL;
977  int yalpha1 = 4096 - yalpha;
978  int i;
979 
980  av_assert2(yalpha <= 4096U);
981 
982  for (i = 0; i < dstW; i++) {
983  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 15;
984  int A;
985 
986  Y = av_clip_uint16(Y);
987 
988  if (hasAlpha) {
989  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 15;
990  A = av_clip_uint16(A);
991  }
992 
993  output_pixel(&dest[2 * i ], Y);
994  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
995  }
996 }
997 
998 static av_always_inline void
1000  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
1001  const int32_t *abuf0, uint16_t *dest, int dstW,
1002  int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
1003 {
1004  int hasAlpha = !!abuf0;
1005  int i;
1006 
1007  for (i = 0; i < dstW; i++) {
1008  int Y = buf0[i] >> 3;/* 19 - 16 */
1009  int A;
1010 
1011  Y = av_clip_uint16(Y);
1012 
1013  if (hasAlpha) {
1014  A = abuf0[i] >> 3;
1015  if (A & 0x100)
1016  A = av_clip_uint16(A);
1017  }
1018 
1019  output_pixel(&dest[2 * i ], Y);
1020  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
1021  }
1022 }
1023 
1024 static av_always_inline void
1025 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
1026  const int32_t **lumSrc, int lumFilterSize,
1027  const int16_t *chrFilter, const int32_t **chrUSrc,
1028  const int32_t **chrVSrc, int chrFilterSize,
1029  const int32_t **alpSrc, uint16_t *dest, int dstW,
1030  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1031 {
1032  int i;
1033  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1034 
1035  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1036  int j;
1037  int Y1 = -0x40000000;
1038  int Y2 = -0x40000000;
1039  int U = -(128 << 23); // 19
1040  int V = -(128 << 23);
1041  int R, G, B;
1042 
1043  for (j = 0; j < lumFilterSize; j++) {
1044  Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
1045  Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1046  }
1047  for (j = 0; j < chrFilterSize; j++) {;
1048  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1049  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1050  }
1051 
1052  if (hasAlpha) {
1053  A1 = -0x40000000;
1054  A2 = -0x40000000;
1055  for (j = 0; j < lumFilterSize; j++) {
1056  A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
1057  A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1058  }
1059  A1 >>= 1;
1060  A1 += 0x20002000;
1061  A2 >>= 1;
1062  A2 += 0x20002000;
1063  }
1064 
1065  // 8 bits: 12+15=27; 16 bits: 12+19=31
1066  Y1 >>= 14; // 10
1067  Y1 += 0x10000;
1068  Y2 >>= 14;
1069  Y2 += 0x10000;
1070  U >>= 14;
1071  V >>= 14;
1072 
1073  // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
1074  Y1 -= c->yuv2rgb_y_offset;
1075  Y2 -= c->yuv2rgb_y_offset;
1076  Y1 *= c->yuv2rgb_y_coeff;
1077  Y2 *= c->yuv2rgb_y_coeff;
1078  Y1 += 1 << 13; // 21
1079  Y2 += 1 << 13;
1080  // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
1081 
1082  R = V * c->yuv2rgb_v2r_coeff;
1083  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1084  B = U * c->yuv2rgb_u2b_coeff;
1085 
1086  // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
1087  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1088  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1089  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1090  if (eightbytes) {
1091  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1092  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1093  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1094  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1095  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1096  dest += 8;
1097  } else {
1098  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1099  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1100  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1101  dest += 6;
1102  }
1103  }
1104 }
1105 
1106 static av_always_inline void
1108  const int32_t *ubuf[2], const int32_t *vbuf[2],
1109  const int32_t *abuf[2], uint16_t *dest, int dstW,
1110  int yalpha, int uvalpha, int y,
1111  enum AVPixelFormat target, int hasAlpha, int eightbytes)
1112 {
1113  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1114  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1115  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1116  *abuf0 = hasAlpha ? abuf[0] : NULL,
1117  *abuf1 = hasAlpha ? abuf[1] : NULL;
1118  int yalpha1 = 4096 - yalpha;
1119  int uvalpha1 = 4096 - uvalpha;
1120  int i;
1121  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1122 
1123  av_assert2(yalpha <= 4096U);
1124  av_assert2(uvalpha <= 4096U);
1125 
1126  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1127  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
1128  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
1129  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1130  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1131  int R, G, B;
1132 
1133  Y1 -= c->yuv2rgb_y_offset;
1134  Y2 -= c->yuv2rgb_y_offset;
1135  Y1 *= c->yuv2rgb_y_coeff;
1136  Y2 *= c->yuv2rgb_y_coeff;
1137  Y1 += 1 << 13;
1138  Y2 += 1 << 13;
1139 
1140  R = V * c->yuv2rgb_v2r_coeff;
1141  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1142  B = U * c->yuv2rgb_u2b_coeff;
1143 
1144  if (hasAlpha) {
1145  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
1146  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
1147 
1148  A1 += 1 << 13;
1149  A2 += 1 << 13;
1150  }
1151 
1152  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1153  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1154  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1155  if (eightbytes) {
1156  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1157  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1158  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1159  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1160  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1161  dest += 8;
1162  } else {
1163  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1164  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1165  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1166  dest += 6;
1167  }
1168  }
1169 }
1170 
1171 static av_always_inline void
1173  const int32_t *ubuf[2], const int32_t *vbuf[2],
1174  const int32_t *abuf0, uint16_t *dest, int dstW,
1175  int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1176 {
1177  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1178  int i;
1179  int A1 = 0xffff<<14, A2= 0xffff<<14;
1180 
1181  if (uvalpha < 2048) {
1182  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1183  int Y1 = (buf0[i * 2] ) >> 2;
1184  int Y2 = (buf0[i * 2 + 1]) >> 2;
1185  int U = (ubuf0[i] - (128 << 11)) >> 2;
1186  int V = (vbuf0[i] - (128 << 11)) >> 2;
1187  int R, G, B;
1188 
1189  Y1 -= c->yuv2rgb_y_offset;
1190  Y2 -= c->yuv2rgb_y_offset;
1191  Y1 *= c->yuv2rgb_y_coeff;
1192  Y2 *= c->yuv2rgb_y_coeff;
1193  Y1 += 1 << 13;
1194  Y2 += 1 << 13;
1195 
1196  if (hasAlpha) {
1197  A1 = abuf0[i * 2 ] << 11;
1198  A2 = abuf0[i * 2 + 1] << 11;
1199 
1200  A1 += 1 << 13;
1201  A2 += 1 << 13;
1202  }
1203 
1204  R = V * c->yuv2rgb_v2r_coeff;
1205  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1206  B = U * c->yuv2rgb_u2b_coeff;
1207 
1208  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1209  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1210  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1211  if (eightbytes) {
1212  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1213  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1214  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1215  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1216  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1217  dest += 8;
1218  } else {
1219  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1220  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1221  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1222  dest += 6;
1223  }
1224  }
1225  } else {
1226  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1227  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1228  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1229  int Y1 = (buf0[i * 2] ) >> 2;
1230  int Y2 = (buf0[i * 2 + 1]) >> 2;
1231  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1232  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1233  int R, G, B;
1234 
1235  Y1 -= c->yuv2rgb_y_offset;
1236  Y2 -= c->yuv2rgb_y_offset;
1237  Y1 *= c->yuv2rgb_y_coeff;
1238  Y2 *= c->yuv2rgb_y_coeff;
1239  Y1 += 1 << 13;
1240  Y2 += 1 << 13;
1241 
1242  if (hasAlpha) {
1243  A1 = abuf0[i * 2 ] << 11;
1244  A2 = abuf0[i * 2 + 1] << 11;
1245 
1246  A1 += 1 << 13;
1247  A2 += 1 << 13;
1248  }
1249 
1250  R = V * c->yuv2rgb_v2r_coeff;
1251  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1252  B = U * c->yuv2rgb_u2b_coeff;
1253 
1254  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1255  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1256  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1257  if (eightbytes) {
1258  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1259  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1260  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1261  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1262  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1263  dest += 8;
1264  } else {
1265  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1266  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1267  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1268  dest += 6;
1269  }
1270  }
1271  }
1272 }
1273 
1274 static av_always_inline void
1275 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1276  const int32_t **lumSrc, int lumFilterSize,
1277  const int16_t *chrFilter, const int32_t **chrUSrc,
1278  const int32_t **chrVSrc, int chrFilterSize,
1279  const int32_t **alpSrc, uint16_t *dest, int dstW,
1280  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1281 {
1282  int i;
1283  int A = 0xffff<<14;
1284 
1285  for (i = 0; i < dstW; i++) {
1286  int j;
1287  int Y = -0x40000000;
1288  int U = -(128 << 23); // 19
1289  int V = -(128 << 23);
1290  int R, G, B;
1291 
1292  for (j = 0; j < lumFilterSize; j++) {
1293  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
1294  }
1295  for (j = 0; j < chrFilterSize; j++) {;
1296  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1297  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1298  }
1299 
1300  if (hasAlpha) {
1301  A = -0x40000000;
1302  for (j = 0; j < lumFilterSize; j++) {
1303  A += alpSrc[j][i] * (unsigned)lumFilter[j];
1304  }
1305  A >>= 1;
1306  A += 0x20002000;
1307  }
1308 
1309  // 8bit: 12+15=27; 16-bit: 12+19=31
1310  Y >>= 14; // 10
1311  Y += 0x10000;
1312  U >>= 14;
1313  V >>= 14;
1314 
1315  // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
1316  Y -= c->yuv2rgb_y_offset;
1317  Y *= c->yuv2rgb_y_coeff;
1318  Y += 1 << 13; // 21
1319  // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
1320 
1321  R = V * c->yuv2rgb_v2r_coeff;
1322  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1323  B = U * c->yuv2rgb_u2b_coeff;
1324 
1325  // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
1326  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1327  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1328  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1329  if (eightbytes) {
1330  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1331  dest += 4;
1332  } else {
1333  dest += 3;
1334  }
1335  }
1336 }
1337 
1338 static av_always_inline void
1340  const int32_t *ubuf[2], const int32_t *vbuf[2],
1341  const int32_t *abuf[2], uint16_t *dest, int dstW,
1342  int yalpha, int uvalpha, int y,
1343  enum AVPixelFormat target, int hasAlpha, int eightbytes)
1344 {
1345  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1346  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1347  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1348  *abuf0 = hasAlpha ? abuf[0] : NULL,
1349  *abuf1 = hasAlpha ? abuf[1] : NULL;
1350  int yalpha1 = 4096 - yalpha;
1351  int uvalpha1 = 4096 - uvalpha;
1352  int i;
1353  int A = 0xffff<<14;
1354 
1355  av_assert2(yalpha <= 4096U);
1356  av_assert2(uvalpha <= 4096U);
1357 
1358  for (i = 0; i < dstW; i++) {
1359  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1360  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1361  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1362  int R, G, B;
1363 
1364  Y -= c->yuv2rgb_y_offset;
1365  Y *= c->yuv2rgb_y_coeff;
1366  Y += 1 << 13;
1367 
1368  R = V * c->yuv2rgb_v2r_coeff;
1369  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1370  B = U * c->yuv2rgb_u2b_coeff;
1371 
1372  if (hasAlpha) {
1373  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1374 
1375  A += 1 << 13;
1376  }
1377 
1378  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1379  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1380  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1381  if (eightbytes) {
1382  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1383  dest += 4;
1384  } else {
1385  dest += 3;
1386  }
1387  }
1388 }
1389 
1390 static av_always_inline void
1392  const int32_t *ubuf[2], const int32_t *vbuf[2],
1393  const int32_t *abuf0, uint16_t *dest, int dstW,
1394  int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1395 {
1396  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1397  int i;
1398  int A = 0xffff<<14;
1399 
1400  if (uvalpha < 2048) {
1401  for (i = 0; i < dstW; i++) {
1402  int Y = (buf0[i]) >> 2;
1403  int U = (ubuf0[i] - (128 << 11)) >> 2;
1404  int V = (vbuf0[i] - (128 << 11)) >> 2;
1405  int R, G, B;
1406 
1407  Y -= c->yuv2rgb_y_offset;
1408  Y *= c->yuv2rgb_y_coeff;
1409  Y += 1 << 13;
1410 
1411  if (hasAlpha) {
1412  A = abuf0[i] << 11;
1413 
1414  A += 1 << 13;
1415  }
1416 
1417  R = V * c->yuv2rgb_v2r_coeff;
1418  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1419  B = U * c->yuv2rgb_u2b_coeff;
1420 
1421  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1422  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1423  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1424  if (eightbytes) {
1425  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1426  dest += 4;
1427  } else {
1428  dest += 3;
1429  }
1430  }
1431  } else {
1432  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1433  int A = 0xffff<<14;
1434  for (i = 0; i < dstW; i++) {
1435  int Y = (buf0[i] ) >> 2;
1436  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1437  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1438  int R, G, B;
1439 
1440  Y -= c->yuv2rgb_y_offset;
1441  Y *= c->yuv2rgb_y_coeff;
1442  Y += 1 << 13;
1443 
1444  if (hasAlpha) {
1445  A = abuf0[i] << 11;
1446 
1447  A += 1 << 13;
1448  }
1449 
1450  R = V * c->yuv2rgb_v2r_coeff;
1451  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1452  B = U * c->yuv2rgb_u2b_coeff;
1453 
1454  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1455  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1456  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1457  if (eightbytes) {
1458  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1459  dest += 4;
1460  } else {
1461  dest += 3;
1462  }
1463  }
1464  }
1465 }
1466 
1467 #undef output_pixel
1468 #undef r_b
1469 #undef b_r
1470 
1471 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \
1472 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1473  const int16_t **_lumSrc, int lumFilterSize, \
1474  const int16_t *chrFilter, const int16_t **_chrUSrc, \
1475  const int16_t **_chrVSrc, int chrFilterSize, \
1476  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1477  int y) \
1478 { \
1479  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1480  **chrUSrc = (const int32_t **) _chrUSrc, \
1481  **chrVSrc = (const int32_t **) _chrVSrc, \
1482  **alpSrc = (const int32_t **) _alpSrc; \
1483  uint16_t *dest = (uint16_t *) _dest; \
1484  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1485  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1486  alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \
1487 } \
1488  \
1489 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1490  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1491  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1492  int yalpha, int uvalpha, int y) \
1493 { \
1494  const int32_t **buf = (const int32_t **) _buf, \
1495  **ubuf = (const int32_t **) _ubuf, \
1496  **vbuf = (const int32_t **) _vbuf, \
1497  **abuf = (const int32_t **) _abuf; \
1498  uint16_t *dest = (uint16_t *) _dest; \
1499  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1500  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \
1501 } \
1502  \
1503 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1504  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1505  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1506  int uvalpha, int y) \
1507 { \
1508  const int32_t *buf0 = (const int32_t *) _buf0, \
1509  **ubuf = (const int32_t **) _ubuf, \
1510  **vbuf = (const int32_t **) _vbuf, \
1511  *abuf0 = (const int32_t *) _abuf0; \
1512  uint16_t *dest = (uint16_t *) _dest; \
1513  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1514  dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \
1515 }
1516 
1517 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48BE, 0, 0)
1518 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48LE, 0, 0)
1519 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48BE, 0, 0)
1520 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48LE, 0, 0)
1521 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64BE, 1, 1)
1522 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64LE, 1, 1)
1523 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64BE, 0, 1)
1524 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64LE, 0, 1)
1525 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64BE, 1, 1)
1526 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64LE, 1, 1)
1527 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64BE, 0, 1)
1528 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64LE, 0, 1)
1529 YUV2PACKED16WRAPPER(yuv2, ya16, ya16be, AV_PIX_FMT_YA16BE, 1, 0)
1530 YUV2PACKED16WRAPPER(yuv2, ya16, ya16le, AV_PIX_FMT_YA16LE, 1, 0)
1531 
1532 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48BE, 0, 0)
1533 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48LE, 0, 0)
1534 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48BE, 0, 0)
1535 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48LE, 0, 0)
1536 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64BE, 1, 1)
1537 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64LE, 1, 1)
1538 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64BE, 0, 1)
1539 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64LE, 0, 1)
1540 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64BE, 1, 1)
1541 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64LE, 1, 1)
1542 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64BE, 0, 1)
1543 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64LE, 0, 1)
1544 
1545 /*
1546  * Write out 2 RGB pixels in the target pixel format. This function takes a
1547  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1548  * things like endianness conversion and shifting. The caller takes care of
1549  * setting the correct offset in these tables from the chroma (U/V) values.
1550  * This function then uses the luminance (Y1/Y2) values to write out the
1551  * correct RGB values into the destination buffer.
1552  */
1553 static av_always_inline void
1554 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1555  unsigned A1, unsigned A2,
1556  const void *_r, const void *_g, const void *_b, int y,
1557  enum AVPixelFormat target, int hasAlpha)
1558 {
1559  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1560  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1561  uint32_t *dest = (uint32_t *) _dest;
1562  const uint32_t *r = (const uint32_t *) _r;
1563  const uint32_t *g = (const uint32_t *) _g;
1564  const uint32_t *b = (const uint32_t *) _b;
1565 
1566 #if CONFIG_SMALL
1567  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1568 
1569  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1570  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1571 #else
1572  if (hasAlpha) {
1573  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1574 
1575  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1576  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1577  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1578  } else {
1579 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1580  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1581 
1582  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1583 #endif
1584  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1585  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1586  }
1587 #endif
1588  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1589  uint8_t *dest = (uint8_t *) _dest;
1590  const uint8_t *r = (const uint8_t *) _r;
1591  const uint8_t *g = (const uint8_t *) _g;
1592  const uint8_t *b = (const uint8_t *) _b;
1593 
1594 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1595 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1596 
1597  dest[i * 6 + 0] = r_b[Y1];
1598  dest[i * 6 + 1] = g[Y1];
1599  dest[i * 6 + 2] = b_r[Y1];
1600  dest[i * 6 + 3] = r_b[Y2];
1601  dest[i * 6 + 4] = g[Y2];
1602  dest[i * 6 + 5] = b_r[Y2];
1603 #undef r_b
1604 #undef b_r
1605  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1606  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1607  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1608  uint16_t *dest = (uint16_t *) _dest;
1609  const uint16_t *r = (const uint16_t *) _r;
1610  const uint16_t *g = (const uint16_t *) _g;
1611  const uint16_t *b = (const uint16_t *) _b;
1612  int dr1, dg1, db1, dr2, dg2, db2;
1613 
1614  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1615  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1616  dg1 = ff_dither_2x2_4[ y & 1 ][0];
1617  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1618  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1619  dg2 = ff_dither_2x2_4[ y & 1 ][1];
1620  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1621  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1622  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1623  dg1 = ff_dither_2x2_8[ y & 1 ][1];
1624  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1625  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1626  dg2 = ff_dither_2x2_8[ y & 1 ][0];
1627  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1628  } else {
1629  dr1 = ff_dither_4x4_16[ y & 3 ][0];
1630  dg1 = ff_dither_4x4_16[ y & 3 ][1];
1631  db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1632  dr2 = ff_dither_4x4_16[ y & 3 ][1];
1633  dg2 = ff_dither_4x4_16[ y & 3 ][0];
1634  db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1635  }
1636 
1637  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1638  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1639  } else if (target == AV_PIX_FMT_X2RGB10 || target == AV_PIX_FMT_X2BGR10) {
1640  uint32_t *dest = (uint32_t *) _dest;
1641  const uint32_t *r = (const uint32_t *) _r;
1642  const uint32_t *g = (const uint32_t *) _g;
1643  const uint32_t *b = (const uint32_t *) _b;
1644  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1645  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1646  } else /* 8/4 bits */ {
1647  uint8_t *dest = (uint8_t *) _dest;
1648  const uint8_t *r = (const uint8_t *) _r;
1649  const uint8_t *g = (const uint8_t *) _g;
1650  const uint8_t *b = (const uint8_t *) _b;
1651  int dr1, dg1, db1, dr2, dg2, db2;
1652 
1653  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1654  const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1655  const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1656  dr1 = dg1 = d32[(i * 2 + 0) & 7];
1657  db1 = d64[(i * 2 + 0) & 7];
1658  dr2 = dg2 = d32[(i * 2 + 1) & 7];
1659  db2 = d64[(i * 2 + 1) & 7];
1660  } else {
1661  const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1662  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1663  dr1 = db1 = d128[(i * 2 + 0) & 7];
1664  dg1 = d64[(i * 2 + 0) & 7];
1665  dr2 = db2 = d128[(i * 2 + 1) & 7];
1666  dg2 = d64[(i * 2 + 1) & 7];
1667  }
1668 
1669  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1670  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1671  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1672  } else {
1673  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1674  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1675  }
1676  }
1677 }
1678 
1679 static av_always_inline void
1680 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1681  const int16_t **lumSrc, int lumFilterSize,
1682  const int16_t *chrFilter, const int16_t **chrUSrc,
1683  const int16_t **chrVSrc, int chrFilterSize,
1684  const int16_t **alpSrc, uint8_t *dest, int dstW,
1685  int y, enum AVPixelFormat target, int hasAlpha)
1686 {
1687  int i;
1688 
1689  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1690  int j, A1, A2;
1691  int Y1 = 1 << 18;
1692  int Y2 = 1 << 18;
1693  int U = 1 << 18;
1694  int V = 1 << 18;
1695  const void *r, *g, *b;
1696 
1697  for (j = 0; j < lumFilterSize; j++) {
1698  Y1 += lumSrc[j][i * 2] * lumFilter[j];
1699  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1700  }
1701  for (j = 0; j < chrFilterSize; j++) {
1702  U += chrUSrc[j][i] * chrFilter[j];
1703  V += chrVSrc[j][i] * chrFilter[j];
1704  }
1705  Y1 >>= 19;
1706  Y2 >>= 19;
1707  U >>= 19;
1708  V >>= 19;
1709  if (hasAlpha) {
1710  A1 = 1 << 18;
1711  A2 = 1 << 18;
1712  for (j = 0; j < lumFilterSize; j++) {
1713  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1714  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1715  }
1716  A1 >>= 19;
1717  A2 >>= 19;
1718  if ((A1 | A2) & 0x100) {
1719  A1 = av_clip_uint8(A1);
1720  A2 = av_clip_uint8(A2);
1721  }
1722  }
1723 
1724  r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1725  g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]);
1726  b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1727 
1728  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1729  r, g, b, y, target, hasAlpha);
1730  }
1731 }
1732 
1733 static av_always_inline void
1734 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1735  const int16_t *ubuf[2], const int16_t *vbuf[2],
1736  const int16_t *abuf[2], uint8_t *dest, int dstW,
1737  int yalpha, int uvalpha, int y,
1738  enum AVPixelFormat target, int hasAlpha)
1739 {
1740  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1741  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1742  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1743  *abuf0 = hasAlpha ? abuf[0] : NULL,
1744  *abuf1 = hasAlpha ? abuf[1] : NULL;
1745  int yalpha1 = 4096 - yalpha;
1746  int uvalpha1 = 4096 - uvalpha;
1747  int i;
1748  av_assert2(yalpha <= 4096U);
1749  av_assert2(uvalpha <= 4096U);
1750 
1751  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1752  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1753  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1754  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1755  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1756  int A1, A2;
1757  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1758  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1759  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1760 
1761  if (hasAlpha) {
1762  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1763  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1764  A1 = av_clip_uint8(A1);
1765  A2 = av_clip_uint8(A2);
1766  }
1767 
1768  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1769  r, g, b, y, target, hasAlpha);
1770  }
1771 }
1772 
1773 static av_always_inline void
1774 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1775  const int16_t *ubuf[2], const int16_t *vbuf[2],
1776  const int16_t *abuf0, uint8_t *dest, int dstW,
1777  int uvalpha, int y, enum AVPixelFormat target,
1778  int hasAlpha)
1779 {
1780  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1781  int i;
1782 
1783  if (uvalpha < 2048) {
1784  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1785  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1786  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1787  int U = (ubuf0[i] + 64) >> 7;
1788  int V = (vbuf0[i] + 64) >> 7;
1789  int A1, A2;
1790  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1791  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1792  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1793 
1794  if (hasAlpha) {
1795  A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1796  A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1797  A1 = av_clip_uint8(A1);
1798  A2 = av_clip_uint8(A2);
1799  }
1800 
1801  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1802  r, g, b, y, target, hasAlpha);
1803  }
1804  } else {
1805  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1806  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1807  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1808  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1809  int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1810  int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1811  int A1, A2;
1812  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1813  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1814  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1815 
1816  if (hasAlpha) {
1817  A1 = (abuf0[i * 2 ] + 64) >> 7;
1818  A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1819  A1 = av_clip_uint8(A1);
1820  A2 = av_clip_uint8(A2);
1821  }
1822 
1823  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1824  r, g, b, y, target, hasAlpha);
1825  }
1826  }
1827 }
1828 
1829 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1830 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1831  const int16_t **lumSrc, int lumFilterSize, \
1832  const int16_t *chrFilter, const int16_t **chrUSrc, \
1833  const int16_t **chrVSrc, int chrFilterSize, \
1834  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1835  int y) \
1836 { \
1837  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1838  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1839  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1840 }
1841 
1842 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1843 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1844 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1845  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1846  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1847  int yalpha, int uvalpha, int y) \
1848 { \
1849  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1850  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1851 }
1852 
1853 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1854 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1855 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1856  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1857  const int16_t *abuf0, uint8_t *dest, int dstW, \
1858  int uvalpha, int y) \
1859 { \
1860  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1861  dstW, uvalpha, y, fmt, hasAlpha); \
1862 }
1863 
1864 #if CONFIG_SMALL
1865 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1866 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1867 #else
1868 #if CONFIG_SWSCALE_ALPHA
1871 #endif
1874 #endif
1875 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1876 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1883 YUV2RGBWRAPPER(yuv2, rgb, x2rgb10, AV_PIX_FMT_X2RGB10, 0)
1884 YUV2RGBWRAPPER(yuv2, rgb, x2bgr10, AV_PIX_FMT_X2BGR10, 0)
1885 
1887  uint8_t *dest, int i, int Y, int A, int U, int V,
1888  int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1889 {
1890  int R, G, B;
1891  int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1892 
1893  Y -= c->yuv2rgb_y_offset;
1894  Y *= c->yuv2rgb_y_coeff;
1895  Y += 1 << 21;
1896  R = (unsigned)Y + V*c->yuv2rgb_v2r_coeff;
1897  G = (unsigned)Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1898  B = (unsigned)Y + U*c->yuv2rgb_u2b_coeff;
1899  if ((R | G | B) & 0xC0000000) {
1900  R = av_clip_uintp2(R, 30);
1901  G = av_clip_uintp2(G, 30);
1902  B = av_clip_uintp2(B, 30);
1903  }
1904 
1905  switch(target) {
1906  case AV_PIX_FMT_ARGB:
1907  dest[0] = hasAlpha ? A : 255;
1908  dest[1] = R >> 22;
1909  dest[2] = G >> 22;
1910  dest[3] = B >> 22;
1911  break;
1912  case AV_PIX_FMT_RGB24:
1913  dest[0] = R >> 22;
1914  dest[1] = G >> 22;
1915  dest[2] = B >> 22;
1916  break;
1917  case AV_PIX_FMT_RGBA:
1918  dest[0] = R >> 22;
1919  dest[1] = G >> 22;
1920  dest[2] = B >> 22;
1921  dest[3] = hasAlpha ? A : 255;
1922  break;
1923  case AV_PIX_FMT_ABGR:
1924  dest[0] = hasAlpha ? A : 255;
1925  dest[1] = B >> 22;
1926  dest[2] = G >> 22;
1927  dest[3] = R >> 22;
1928  break;
1929  case AV_PIX_FMT_BGR24:
1930  dest[0] = B >> 22;
1931  dest[1] = G >> 22;
1932  dest[2] = R >> 22;
1933  break;
1934  case AV_PIX_FMT_BGRA:
1935  dest[0] = B >> 22;
1936  dest[1] = G >> 22;
1937  dest[2] = R >> 22;
1938  dest[3] = hasAlpha ? A : 255;
1939  break;
1940  case AV_PIX_FMT_BGR4_BYTE:
1941  case AV_PIX_FMT_RGB4_BYTE:
1942  case AV_PIX_FMT_BGR8:
1943  case AV_PIX_FMT_RGB8:
1944  {
1945  int r,g,b;
1946 
1947  switch (c->dither) {
1948  case SWS_DITHER_NONE:
1949  if (isrgb8) {
1950  r = av_clip_uintp2(R >> 27, 3);
1951  g = av_clip_uintp2(G >> 27, 3);
1952  b = av_clip_uintp2(B >> 28, 2);
1953  } else {
1954  r = av_clip_uintp2(R >> 29, 1);
1955  g = av_clip_uintp2(G >> 28, 2);
1956  b = av_clip_uintp2(B >> 29, 1);
1957  }
1958  break;
1959  default:
1960  case SWS_DITHER_AUTO:
1961  case SWS_DITHER_ED:
1962  R >>= 22;
1963  G >>= 22;
1964  B >>= 22;
1965  R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1966  G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1967  B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1968  c->dither_error[0][i] = err[0];
1969  c->dither_error[1][i] = err[1];
1970  c->dither_error[2][i] = err[2];
1971  r = R >> (isrgb8 ? 5 : 7);
1972  g = G >> (isrgb8 ? 5 : 6);
1973  b = B >> (isrgb8 ? 6 : 7);
1974  r = av_clip(r, 0, isrgb8 ? 7 : 1);
1975  g = av_clip(g, 0, isrgb8 ? 7 : 3);
1976  b = av_clip(b, 0, isrgb8 ? 3 : 1);
1977  err[0] = R - r*(isrgb8 ? 36 : 255);
1978  err[1] = G - g*(isrgb8 ? 36 : 85);
1979  err[2] = B - b*(isrgb8 ? 85 : 255);
1980  break;
1981  case SWS_DITHER_A_DITHER:
1982  if (isrgb8) {
1983  /* see http://pippin.gimp.org/a_dither/ for details/origin */
1984 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
1985  r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
1986  g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
1987  b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
1988  r = av_clip_uintp2(r, 3);
1989  g = av_clip_uintp2(g, 3);
1990  b = av_clip_uintp2(b, 2);
1991  } else {
1992  r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
1993  g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
1994  b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
1995  r = av_clip_uintp2(r, 1);
1996  g = av_clip_uintp2(g, 2);
1997  b = av_clip_uintp2(b, 1);
1998  }
1999  break;
2000  case SWS_DITHER_X_DITHER:
2001  if (isrgb8) {
2002  /* see http://pippin.gimp.org/a_dither/ for details/origin */
2003 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
2004  r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
2005  g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
2006  b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
2007  r = av_clip_uintp2(r, 3);
2008  g = av_clip_uintp2(g, 3);
2009  b = av_clip_uintp2(b, 2);
2010  } else {
2011  r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
2012  g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
2013  b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
2014  r = av_clip_uintp2(r, 1);
2015  g = av_clip_uintp2(g, 2);
2016  b = av_clip_uintp2(b, 1);
2017  }
2018 
2019  break;
2020  }
2021 
2022  if(target == AV_PIX_FMT_BGR4_BYTE) {
2023  dest[0] = r + 2*g + 8*b;
2024  } else if(target == AV_PIX_FMT_RGB4_BYTE) {
2025  dest[0] = b + 2*g + 8*r;
2026  } else if(target == AV_PIX_FMT_BGR8) {
2027  dest[0] = r + 8*g + 64*b;
2028  } else if(target == AV_PIX_FMT_RGB8) {
2029  dest[0] = b + 4*g + 32*r;
2030  } else
2031  av_assert2(0);
2032  break;}
2033  }
2034 }
2035 
2036 static av_always_inline void
2037 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
2038  const int16_t **lumSrc, int lumFilterSize,
2039  const int16_t *chrFilter, const int16_t **chrUSrc,
2040  const int16_t **chrVSrc, int chrFilterSize,
2041  const int16_t **alpSrc, uint8_t *dest,
2042  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
2043 {
2044  int i;
2045  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2046  int err[4] = {0};
2047  int A = 0; //init to silence warning
2048 
2049  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2050  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2051  step = 1;
2052 
2053  for (i = 0; i < dstW; i++) {
2054  int j;
2055  int Y = 1<<9;
2056  int U = (1<<9)-(128 << 19);
2057  int V = (1<<9)-(128 << 19);
2058 
2059  for (j = 0; j < lumFilterSize; j++) {
2060  Y += lumSrc[j][i] * lumFilter[j];
2061  }
2062  for (j = 0; j < chrFilterSize; j++) {
2063  U += chrUSrc[j][i] * chrFilter[j];
2064  V += chrVSrc[j][i] * chrFilter[j];
2065  }
2066  Y >>= 10;
2067  U >>= 10;
2068  V >>= 10;
2069  if (hasAlpha) {
2070  A = 1 << 18;
2071  for (j = 0; j < lumFilterSize; j++) {
2072  A += alpSrc[j][i] * lumFilter[j];
2073  }
2074  A >>= 19;
2075  if (A & 0x100)
2076  A = av_clip_uint8(A);
2077  }
2078  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2079  dest += step;
2080  }
2081  c->dither_error[0][i] = err[0];
2082  c->dither_error[1][i] = err[1];
2083  c->dither_error[2][i] = err[2];
2084 }
2085 
2086 static av_always_inline void
2087 yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2],
2088  const int16_t *ubuf[2], const int16_t *vbuf[2],
2089  const int16_t *abuf[2], uint8_t *dest, int dstW,
2090  int yalpha, int uvalpha, int y,
2091  enum AVPixelFormat target, int hasAlpha)
2092 {
2093  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2094  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2095  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2096  *abuf0 = hasAlpha ? abuf[0] : NULL,
2097  *abuf1 = hasAlpha ? abuf[1] : NULL;
2098  int yalpha1 = 4096 - yalpha;
2099  int uvalpha1 = 4096 - uvalpha;
2100  int i;
2101  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2102  int err[4] = {0};
2103  int A = 0; // init to silcene warning
2104 
2105  av_assert2(yalpha <= 4096U);
2106  av_assert2(uvalpha <= 4096U);
2107 
2108  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2109  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2110  step = 1;
2111 
2112  for (i = 0; i < dstW; i++) {
2113  int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
2114  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
2115  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
2116 
2117  if (hasAlpha) {
2118  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
2119  if (A & 0x100)
2120  A = av_clip_uint8(A);
2121  }
2122 
2123  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2124  dest += step;
2125  }
2126  c->dither_error[0][i] = err[0];
2127  c->dither_error[1][i] = err[1];
2128  c->dither_error[2][i] = err[2];
2129 }
2130 
2131 static av_always_inline void
2133  const int16_t *ubuf[2], const int16_t *vbuf[2],
2134  const int16_t *abuf0, uint8_t *dest, int dstW,
2135  int uvalpha, int y, enum AVPixelFormat target,
2136  int hasAlpha)
2137 {
2138  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2139  int i;
2140  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2141  int err[4] = {0};
2142 
2143  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2144  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2145  step = 1;
2146 
2147  if (uvalpha < 2048) {
2148  int A = 0; //init to silence warning
2149  for (i = 0; i < dstW; i++) {
2150  int Y = buf0[i] * 4;
2151  int U = (ubuf0[i] - (128<<7)) * 4;
2152  int V = (vbuf0[i] - (128<<7)) * 4;
2153 
2154  if (hasAlpha) {
2155  A = (abuf0[i] + 64) >> 7;
2156  if (A & 0x100)
2157  A = av_clip_uint8(A);
2158  }
2159 
2160  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2161  dest += step;
2162  }
2163  } else {
2164  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2165  int A = 0; //init to silence warning
2166  for (i = 0; i < dstW; i++) {
2167  int Y = buf0[i] * 4;
2168  int U = (ubuf0[i] + ubuf1[i] - (128<<8)) * 2;
2169  int V = (vbuf0[i] + vbuf1[i] - (128<<8)) * 2;
2170 
2171  if (hasAlpha) {
2172  A = (abuf0[i] + 64) >> 7;
2173  if (A & 0x100)
2174  A = av_clip_uint8(A);
2175  }
2176 
2177  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2178  dest += step;
2179  }
2180  }
2181 
2182  c->dither_error[0][i] = err[0];
2183  c->dither_error[1][i] = err[1];
2184  c->dither_error[2][i] = err[2];
2185 }
2186 
2187 #if CONFIG_SMALL
2188 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2189 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2190 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2191 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2192 #else
2193 #if CONFIG_SWSCALE_ALPHA
2194 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
2195 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
2196 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
2197 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
2198 #endif
2199 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
2200 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
2201 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
2202 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
2203 #endif
2204 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
2205 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
2206 
2207 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
2208 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
2209 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
2210 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
2211 
2212 static void
2213 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
2214  const int16_t **lumSrc, int lumFilterSize,
2215  const int16_t *chrFilter, const int16_t **chrUSrc,
2216  const int16_t **chrVSrc, int chrFilterSize,
2217  const int16_t **alpSrc, uint8_t **dest,
2218  int dstW, int y)
2219 {
2220  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2221  int i;
2222  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
2223  uint16_t **dest16 = (uint16_t**)dest;
2224  int SH = 22 + 8 - desc->comp[0].depth;
2225  int A = 0; // init to silence warning
2226 
2227  for (i = 0; i < dstW; i++) {
2228  int j;
2229  int Y = 1 << 9;
2230  int U = (1 << 9) - (128 << 19);
2231  int V = (1 << 9) - (128 << 19);
2232  int R, G, B;
2233 
2234  for (j = 0; j < lumFilterSize; j++)
2235  Y += lumSrc[j][i] * lumFilter[j];
2236 
2237  for (j = 0; j < chrFilterSize; j++) {
2238  U += chrUSrc[j][i] * chrFilter[j];
2239  V += chrVSrc[j][i] * chrFilter[j];
2240  }
2241 
2242  Y >>= 10;
2243  U >>= 10;
2244  V >>= 10;
2245 
2246  if (hasAlpha) {
2247  A = 1 << 18;
2248 
2249  for (j = 0; j < lumFilterSize; j++)
2250  A += alpSrc[j][i] * lumFilter[j];
2251 
2252  if (A & 0xF8000000)
2253  A = av_clip_uintp2(A, 27);
2254  }
2255 
2256  Y -= c->yuv2rgb_y_offset;
2257  Y *= c->yuv2rgb_y_coeff;
2258  Y += 1 << (SH-1);
2259  R = Y + V * c->yuv2rgb_v2r_coeff;
2260  G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2261  B = Y + U * c->yuv2rgb_u2b_coeff;
2262 
2263  if ((R | G | B) & 0xC0000000) {
2264  R = av_clip_uintp2(R, 30);
2265  G = av_clip_uintp2(G, 30);
2266  B = av_clip_uintp2(B, 30);
2267  }
2268 
2269  if (SH != 22) {
2270  dest16[0][i] = G >> SH;
2271  dest16[1][i] = B >> SH;
2272  dest16[2][i] = R >> SH;
2273  if (hasAlpha)
2274  dest16[3][i] = A >> (SH - 3);
2275  } else {
2276  dest[0][i] = G >> 22;
2277  dest[1][i] = B >> 22;
2278  dest[2][i] = R >> 22;
2279  if (hasAlpha)
2280  dest[3][i] = A >> 19;
2281  }
2282  }
2283  if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2284  for (i = 0; i < dstW; i++) {
2285  dest16[0][i] = av_bswap16(dest16[0][i]);
2286  dest16[1][i] = av_bswap16(dest16[1][i]);
2287  dest16[2][i] = av_bswap16(dest16[2][i]);
2288  if (hasAlpha)
2289  dest16[3][i] = av_bswap16(dest16[3][i]);
2290  }
2291  }
2292 }
2293 
2294 static void
2295 yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter,
2296  const int16_t **lumSrcx, int lumFilterSize,
2297  const int16_t *chrFilter, const int16_t **chrUSrcx,
2298  const int16_t **chrVSrcx, int chrFilterSize,
2299  const int16_t **alpSrcx, uint8_t **dest,
2300  int dstW, int y)
2301 {
2302  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2303  int i;
2304  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2305  uint16_t **dest16 = (uint16_t**)dest;
2306  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2307  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2308  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2309  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2310 
2311  for (i = 0; i < dstW; i++) {
2312  int j;
2313  int Y = -0x40000000;
2314  int U = -(128 << 23);
2315  int V = -(128 << 23);
2316  int R, G, B, A;
2317 
2318  for (j = 0; j < lumFilterSize; j++)
2319  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2320 
2321  for (j = 0; j < chrFilterSize; j++) {
2322  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2323  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2324  }
2325 
2326  Y >>= 14;
2327  Y += 0x10000;
2328  U >>= 14;
2329  V >>= 14;
2330 
2331  if (hasAlpha) {
2332  A = -0x40000000;
2333 
2334  for (j = 0; j < lumFilterSize; j++)
2335  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2336 
2337  A >>= 1;
2338  A += 0x20002000;
2339  }
2340 
2341  Y -= c->yuv2rgb_y_offset;
2342  Y *= c->yuv2rgb_y_coeff;
2343  Y += 1 << 13;
2344  R = V * c->yuv2rgb_v2r_coeff;
2345  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2346  B = U * c->yuv2rgb_u2b_coeff;
2347 
2348  R = av_clip_uintp2(Y + R, 30);
2349  G = av_clip_uintp2(Y + G, 30);
2350  B = av_clip_uintp2(Y + B, 30);
2351 
2352  dest16[0][i] = G >> 14;
2353  dest16[1][i] = B >> 14;
2354  dest16[2][i] = R >> 14;
2355  if (hasAlpha)
2356  dest16[3][i] = av_clip_uintp2(A, 30) >> 14;
2357  }
2358  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2359  for (i = 0; i < dstW; i++) {
2360  dest16[0][i] = av_bswap16(dest16[0][i]);
2361  dest16[1][i] = av_bswap16(dest16[1][i]);
2362  dest16[2][i] = av_bswap16(dest16[2][i]);
2363  if (hasAlpha)
2364  dest16[3][i] = av_bswap16(dest16[3][i]);
2365  }
2366  }
2367 }
2368 
2369 static void
2370 yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter,
2371  const int16_t **lumSrcx, int lumFilterSize,
2372  const int16_t *chrFilter, const int16_t **chrUSrcx,
2373  const int16_t **chrVSrcx, int chrFilterSize,
2374  const int16_t **alpSrcx, uint8_t **dest,
2375  int dstW, int y)
2376 {
2377  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2378  int i;
2379  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2380  uint32_t **dest32 = (uint32_t**)dest;
2381  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2382  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2383  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2384  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2385  static const float float_mult = 1.0f / 65535.0f;
2386 
2387  for (i = 0; i < dstW; i++) {
2388  int j;
2389  int Y = -0x40000000;
2390  int U = -(128 << 23);
2391  int V = -(128 << 23);
2392  int R, G, B, A;
2393 
2394  for (j = 0; j < lumFilterSize; j++)
2395  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2396 
2397  for (j = 0; j < chrFilterSize; j++) {
2398  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2399  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2400  }
2401 
2402  Y >>= 14;
2403  Y += 0x10000;
2404  U >>= 14;
2405  V >>= 14;
2406 
2407  if (hasAlpha) {
2408  A = -0x40000000;
2409 
2410  for (j = 0; j < lumFilterSize; j++)
2411  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2412 
2413  A >>= 1;
2414  A += 0x20002000;
2415  }
2416 
2417  Y -= c->yuv2rgb_y_offset;
2418  Y *= c->yuv2rgb_y_coeff;
2419  Y += 1 << 13;
2420  R = V * c->yuv2rgb_v2r_coeff;
2421  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2422  B = U * c->yuv2rgb_u2b_coeff;
2423 
2424  R = av_clip_uintp2(Y + R, 30);
2425  G = av_clip_uintp2(Y + G, 30);
2426  B = av_clip_uintp2(Y + B, 30);
2427 
2428  dest32[0][i] = av_float2int(float_mult * (float)(G >> 14));
2429  dest32[1][i] = av_float2int(float_mult * (float)(B >> 14));
2430  dest32[2][i] = av_float2int(float_mult * (float)(R >> 14));
2431  if (hasAlpha)
2432  dest32[3][i] = av_float2int(float_mult * (float)(av_clip_uintp2(A, 30) >> 14));
2433  }
2434  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2435  for (i = 0; i < dstW; i++) {
2436  dest32[0][i] = av_bswap32(dest32[0][i]);
2437  dest32[1][i] = av_bswap32(dest32[1][i]);
2438  dest32[2][i] = av_bswap32(dest32[2][i]);
2439  if (hasAlpha)
2440  dest32[3][i] = av_bswap32(dest32[3][i]);
2441  }
2442  }
2443 }
2444 
2445 static void
2446 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
2447  const int16_t *ubuf[2], const int16_t *vbuf[2],
2448  const int16_t *abuf0, uint8_t *dest, int dstW,
2449  int uvalpha, int y)
2450 {
2451  int hasAlpha = !!abuf0;
2452  int i;
2453 
2454  for (i = 0; i < dstW; i++) {
2455  int Y = (buf0[i] + 64) >> 7;
2456  int A;
2457 
2458  Y = av_clip_uint8(Y);
2459 
2460  if (hasAlpha) {
2461  A = (abuf0[i] + 64) >> 7;
2462  if (A & 0x100)
2463  A = av_clip_uint8(A);
2464  }
2465 
2466  dest[i * 2 ] = Y;
2467  dest[i * 2 + 1] = hasAlpha ? A : 255;
2468  }
2469 }
2470 
2471 static void
2472 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
2473  const int16_t *ubuf[2], const int16_t *vbuf[2],
2474  const int16_t *abuf[2], uint8_t *dest, int dstW,
2475  int yalpha, int uvalpha, int y)
2476 {
2477  int hasAlpha = abuf && abuf[0] && abuf[1];
2478  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2479  *abuf0 = hasAlpha ? abuf[0] : NULL,
2480  *abuf1 = hasAlpha ? abuf[1] : NULL;
2481  int yalpha1 = 4096 - yalpha;
2482  int i;
2483 
2484  av_assert2(yalpha <= 4096U);
2485 
2486  for (i = 0; i < dstW; i++) {
2487  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2488  int A;
2489 
2490  Y = av_clip_uint8(Y);
2491 
2492  if (hasAlpha) {
2493  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2494  A = av_clip_uint8(A);
2495  }
2496 
2497  dest[i * 2 ] = Y;
2498  dest[i * 2 + 1] = hasAlpha ? A : 255;
2499  }
2500 }
2501 
2502 static void
2503 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
2504  const int16_t **lumSrc, int lumFilterSize,
2505  const int16_t *chrFilter, const int16_t **chrUSrc,
2506  const int16_t **chrVSrc, int chrFilterSize,
2507  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2508 {
2509  int hasAlpha = !!alpSrc;
2510  int i;
2511 
2512  for (i = 0; i < dstW; i++) {
2513  int j;
2514  int Y = 1 << 18, A = 1 << 18;
2515 
2516  for (j = 0; j < lumFilterSize; j++)
2517  Y += lumSrc[j][i] * lumFilter[j];
2518 
2519  Y >>= 19;
2520  if (Y & 0x100)
2521  Y = av_clip_uint8(Y);
2522 
2523  if (hasAlpha) {
2524  for (j = 0; j < lumFilterSize; j++)
2525  A += alpSrc[j][i] * lumFilter[j];
2526 
2527  A >>= 19;
2528 
2529  if (A & 0x100)
2530  A = av_clip_uint8(A);
2531  }
2532 
2533  dest[2 * i ] = Y;
2534  dest[2 * i + 1] = hasAlpha ? A : 255;
2535  }
2536 }
2537 
2538 static void
2539 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2540  const int16_t **_lumSrc, int lumFilterSize,
2541  const int16_t *chrFilter, const int16_t **_chrUSrc,
2542  const int16_t **_chrVSrc, int chrFilterSize,
2543  const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2544 {
2545  const int32_t **lumSrc = (const int32_t **) _lumSrc,
2546  **chrUSrc = (const int32_t **) _chrUSrc,
2547  **chrVSrc = (const int32_t **) _chrVSrc,
2548  **alpSrc = (const int32_t **) _alpSrc;
2549  int hasAlpha = !!alpSrc;
2550  int i;
2551 
2552  for (i = 0; i < dstW; i++) {
2553  int Y = 1 << 14, U = 1 << 14;
2554  int V = 1 << 14, A = 1 << 14;
2555  int j;
2556 
2557  Y -= 0x40000000;
2558  U -= 0x40000000;
2559  V -= 0x40000000;
2560  A -= 0x40000000;
2561 
2562  for (j = 0; j < lumFilterSize; j++)
2563  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2564 
2565  for (j = 0; j < chrFilterSize; j++)
2566  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2567 
2568  for (j = 0; j < chrFilterSize; j++)
2569  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2570 
2571  if (hasAlpha)
2572  for (j = 0; j < lumFilterSize; j++)
2573  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2574 
2575  Y = 0x8000 + av_clip_int16(Y >> 15);
2576  U = 0x8000 + av_clip_int16(U >> 15);
2577  V = 0x8000 + av_clip_int16(V >> 15);
2578  A = 0x8000 + av_clip_int16(A >> 15);
2579 
2580  AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2581  AV_WL16(dest + 8 * i + 2, Y);
2582  AV_WL16(dest + 8 * i + 4, U);
2583  AV_WL16(dest + 8 * i + 6, V);
2584  }
2585 }
2586 
2588  yuv2planar1_fn *yuv2plane1,
2590  yuv2interleavedX_fn *yuv2nv12cX,
2591  yuv2packed1_fn *yuv2packed1,
2592  yuv2packed2_fn *yuv2packed2,
2593  yuv2packedX_fn *yuv2packedX,
2594  yuv2anyX_fn *yuv2anyX)
2595 {
2596  enum AVPixelFormat dstFormat = c->dstFormat;
2597  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
2598 
2599  if (isSemiPlanarYUV(dstFormat) && isDataInHighBits(dstFormat)) {
2600  av_assert0(desc->comp[0].depth == 10);
2601  *yuv2plane1 = isBE(dstFormat) ? yuv2p010l1_BE_c : yuv2p010l1_LE_c;
2602  *yuv2planeX = isBE(dstFormat) ? yuv2p010lX_BE_c : yuv2p010lX_LE_c;
2603  *yuv2nv12cX = isBE(dstFormat) ? yuv2p010cX_BE_c : yuv2p010cX_LE_c;
2604  } else if (is16BPS(dstFormat)) {
2605  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2606  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2607  if (isSemiPlanarYUV(dstFormat)) {
2608  *yuv2nv12cX = isBE(dstFormat) ? yuv2nv12cX_16BE_c : yuv2nv12cX_16LE_c;
2609  }
2610  } else if (isNBPS(dstFormat)) {
2611  if (desc->comp[0].depth == 9) {
2612  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2613  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2614  } else if (desc->comp[0].depth == 10) {
2615  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2616  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2617  } else if (desc->comp[0].depth == 12) {
2618  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2619  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2620  } else if (desc->comp[0].depth == 14) {
2621  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2622  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2623  } else
2624  av_assert0(0);
2625  } else if (dstFormat == AV_PIX_FMT_GRAYF32BE) {
2626  *yuv2planeX = yuv2planeX_floatBE_c;
2627  *yuv2plane1 = yuv2plane1_floatBE_c;
2628  } else if (dstFormat == AV_PIX_FMT_GRAYF32LE) {
2629  *yuv2planeX = yuv2planeX_floatLE_c;
2630  *yuv2plane1 = yuv2plane1_floatLE_c;
2631  } else {
2632  *yuv2plane1 = yuv2plane1_8_c;
2634  if (isSemiPlanarYUV(dstFormat))
2635  *yuv2nv12cX = yuv2nv12cX_c;
2636  }
2637 
2638  if(c->flags & SWS_FULL_CHR_H_INT) {
2639  switch (dstFormat) {
2640  case AV_PIX_FMT_RGBA:
2641 #if CONFIG_SMALL
2642  *yuv2packedX = yuv2rgba32_full_X_c;
2643  *yuv2packed2 = yuv2rgba32_full_2_c;
2644  *yuv2packed1 = yuv2rgba32_full_1_c;
2645 #else
2646 #if CONFIG_SWSCALE_ALPHA
2647  if (c->needAlpha) {
2648  *yuv2packedX = yuv2rgba32_full_X_c;
2649  *yuv2packed2 = yuv2rgba32_full_2_c;
2650  *yuv2packed1 = yuv2rgba32_full_1_c;
2651  } else
2652 #endif /* CONFIG_SWSCALE_ALPHA */
2653  {
2654  *yuv2packedX = yuv2rgbx32_full_X_c;
2655  *yuv2packed2 = yuv2rgbx32_full_2_c;
2656  *yuv2packed1 = yuv2rgbx32_full_1_c;
2657  }
2658 #endif /* !CONFIG_SMALL */
2659  break;
2660  case AV_PIX_FMT_ARGB:
2661 #if CONFIG_SMALL
2662  *yuv2packedX = yuv2argb32_full_X_c;
2663  *yuv2packed2 = yuv2argb32_full_2_c;
2664  *yuv2packed1 = yuv2argb32_full_1_c;
2665 #else
2666 #if CONFIG_SWSCALE_ALPHA
2667  if (c->needAlpha) {
2668  *yuv2packedX = yuv2argb32_full_X_c;
2669  *yuv2packed2 = yuv2argb32_full_2_c;
2670  *yuv2packed1 = yuv2argb32_full_1_c;
2671  } else
2672 #endif /* CONFIG_SWSCALE_ALPHA */
2673  {
2674  *yuv2packedX = yuv2xrgb32_full_X_c;
2675  *yuv2packed2 = yuv2xrgb32_full_2_c;
2676  *yuv2packed1 = yuv2xrgb32_full_1_c;
2677  }
2678 #endif /* !CONFIG_SMALL */
2679  break;
2680  case AV_PIX_FMT_BGRA:
2681 #if CONFIG_SMALL
2682  *yuv2packedX = yuv2bgra32_full_X_c;
2683  *yuv2packed2 = yuv2bgra32_full_2_c;
2684  *yuv2packed1 = yuv2bgra32_full_1_c;
2685 #else
2686 #if CONFIG_SWSCALE_ALPHA
2687  if (c->needAlpha) {
2688  *yuv2packedX = yuv2bgra32_full_X_c;
2689  *yuv2packed2 = yuv2bgra32_full_2_c;
2690  *yuv2packed1 = yuv2bgra32_full_1_c;
2691  } else
2692 #endif /* CONFIG_SWSCALE_ALPHA */
2693  {
2694  *yuv2packedX = yuv2bgrx32_full_X_c;
2695  *yuv2packed2 = yuv2bgrx32_full_2_c;
2696  *yuv2packed1 = yuv2bgrx32_full_1_c;
2697  }
2698 #endif /* !CONFIG_SMALL */
2699  break;
2700  case AV_PIX_FMT_ABGR:
2701 #if CONFIG_SMALL
2702  *yuv2packedX = yuv2abgr32_full_X_c;
2703  *yuv2packed2 = yuv2abgr32_full_2_c;
2704  *yuv2packed1 = yuv2abgr32_full_1_c;
2705 #else
2706 #if CONFIG_SWSCALE_ALPHA
2707  if (c->needAlpha) {
2708  *yuv2packedX = yuv2abgr32_full_X_c;
2709  *yuv2packed2 = yuv2abgr32_full_2_c;
2710  *yuv2packed1 = yuv2abgr32_full_1_c;
2711  } else
2712 #endif /* CONFIG_SWSCALE_ALPHA */
2713  {
2714  *yuv2packedX = yuv2xbgr32_full_X_c;
2715  *yuv2packed2 = yuv2xbgr32_full_2_c;
2716  *yuv2packed1 = yuv2xbgr32_full_1_c;
2717  }
2718 #endif /* !CONFIG_SMALL */
2719  break;
2720  case AV_PIX_FMT_RGBA64LE:
2721 #if CONFIG_SWSCALE_ALPHA
2722  if (c->needAlpha) {
2723  *yuv2packedX = yuv2rgba64le_full_X_c;
2724  *yuv2packed2 = yuv2rgba64le_full_2_c;
2725  *yuv2packed1 = yuv2rgba64le_full_1_c;
2726  } else
2727 #endif /* CONFIG_SWSCALE_ALPHA */
2728  {
2729  *yuv2packedX = yuv2rgbx64le_full_X_c;
2730  *yuv2packed2 = yuv2rgbx64le_full_2_c;
2731  *yuv2packed1 = yuv2rgbx64le_full_1_c;
2732  }
2733  break;
2734  case AV_PIX_FMT_RGBA64BE:
2735 #if CONFIG_SWSCALE_ALPHA
2736  if (c->needAlpha) {
2737  *yuv2packedX = yuv2rgba64be_full_X_c;
2738  *yuv2packed2 = yuv2rgba64be_full_2_c;
2739  *yuv2packed1 = yuv2rgba64be_full_1_c;
2740  } else
2741 #endif /* CONFIG_SWSCALE_ALPHA */
2742  {
2743  *yuv2packedX = yuv2rgbx64be_full_X_c;
2744  *yuv2packed2 = yuv2rgbx64be_full_2_c;
2745  *yuv2packed1 = yuv2rgbx64be_full_1_c;
2746  }
2747  break;
2748  case AV_PIX_FMT_BGRA64LE:
2749 #if CONFIG_SWSCALE_ALPHA
2750  if (c->needAlpha) {
2751  *yuv2packedX = yuv2bgra64le_full_X_c;
2752  *yuv2packed2 = yuv2bgra64le_full_2_c;
2753  *yuv2packed1 = yuv2bgra64le_full_1_c;
2754  } else
2755 #endif /* CONFIG_SWSCALE_ALPHA */
2756  {
2757  *yuv2packedX = yuv2bgrx64le_full_X_c;
2758  *yuv2packed2 = yuv2bgrx64le_full_2_c;
2759  *yuv2packed1 = yuv2bgrx64le_full_1_c;
2760  }
2761  break;
2762  case AV_PIX_FMT_BGRA64BE:
2763 #if CONFIG_SWSCALE_ALPHA
2764  if (c->needAlpha) {
2765  *yuv2packedX = yuv2bgra64be_full_X_c;
2766  *yuv2packed2 = yuv2bgra64be_full_2_c;
2767  *yuv2packed1 = yuv2bgra64be_full_1_c;
2768  } else
2769 #endif /* CONFIG_SWSCALE_ALPHA */
2770  {
2771  *yuv2packedX = yuv2bgrx64be_full_X_c;
2772  *yuv2packed2 = yuv2bgrx64be_full_2_c;
2773  *yuv2packed1 = yuv2bgrx64be_full_1_c;
2774  }
2775  break;
2776 
2777  case AV_PIX_FMT_RGB24:
2778  *yuv2packedX = yuv2rgb24_full_X_c;
2779  *yuv2packed2 = yuv2rgb24_full_2_c;
2780  *yuv2packed1 = yuv2rgb24_full_1_c;
2781  break;
2782  case AV_PIX_FMT_BGR24:
2783  *yuv2packedX = yuv2bgr24_full_X_c;
2784  *yuv2packed2 = yuv2bgr24_full_2_c;
2785  *yuv2packed1 = yuv2bgr24_full_1_c;
2786  break;
2787  case AV_PIX_FMT_RGB48LE:
2788  *yuv2packedX = yuv2rgb48le_full_X_c;
2789  *yuv2packed2 = yuv2rgb48le_full_2_c;
2790  *yuv2packed1 = yuv2rgb48le_full_1_c;
2791  break;
2792  case AV_PIX_FMT_BGR48LE:
2793  *yuv2packedX = yuv2bgr48le_full_X_c;
2794  *yuv2packed2 = yuv2bgr48le_full_2_c;
2795  *yuv2packed1 = yuv2bgr48le_full_1_c;
2796  break;
2797  case AV_PIX_FMT_RGB48BE:
2798  *yuv2packedX = yuv2rgb48be_full_X_c;
2799  *yuv2packed2 = yuv2rgb48be_full_2_c;
2800  *yuv2packed1 = yuv2rgb48be_full_1_c;
2801  break;
2802  case AV_PIX_FMT_BGR48BE:
2803  *yuv2packedX = yuv2bgr48be_full_X_c;
2804  *yuv2packed2 = yuv2bgr48be_full_2_c;
2805  *yuv2packed1 = yuv2bgr48be_full_1_c;
2806  break;
2807  case AV_PIX_FMT_BGR4_BYTE:
2808  *yuv2packedX = yuv2bgr4_byte_full_X_c;
2809  *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2810  *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2811  break;
2812  case AV_PIX_FMT_RGB4_BYTE:
2813  *yuv2packedX = yuv2rgb4_byte_full_X_c;
2814  *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2815  *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2816  break;
2817  case AV_PIX_FMT_BGR8:
2818  *yuv2packedX = yuv2bgr8_full_X_c;
2819  *yuv2packed2 = yuv2bgr8_full_2_c;
2820  *yuv2packed1 = yuv2bgr8_full_1_c;
2821  break;
2822  case AV_PIX_FMT_RGB8:
2823  *yuv2packedX = yuv2rgb8_full_X_c;
2824  *yuv2packed2 = yuv2rgb8_full_2_c;
2825  *yuv2packed1 = yuv2rgb8_full_1_c;
2826  break;
2827  case AV_PIX_FMT_GBRP:
2828  case AV_PIX_FMT_GBRP9BE:
2829  case AV_PIX_FMT_GBRP9LE:
2830  case AV_PIX_FMT_GBRP10BE:
2831  case AV_PIX_FMT_GBRP10LE:
2832  case AV_PIX_FMT_GBRP12BE:
2833  case AV_PIX_FMT_GBRP12LE:
2834  case AV_PIX_FMT_GBRP14BE:
2835  case AV_PIX_FMT_GBRP14LE:
2836  case AV_PIX_FMT_GBRAP:
2837  case AV_PIX_FMT_GBRAP10BE:
2838  case AV_PIX_FMT_GBRAP10LE:
2839  case AV_PIX_FMT_GBRAP12BE:
2840  case AV_PIX_FMT_GBRAP12LE:
2841  *yuv2anyX = yuv2gbrp_full_X_c;
2842  break;
2843  case AV_PIX_FMT_GBRP16BE:
2844  case AV_PIX_FMT_GBRP16LE:
2845  case AV_PIX_FMT_GBRAP16BE:
2846  case AV_PIX_FMT_GBRAP16LE:
2847  *yuv2anyX = yuv2gbrp16_full_X_c;
2848  break;
2849  case AV_PIX_FMT_GBRPF32BE:
2850  case AV_PIX_FMT_GBRPF32LE:
2851  case AV_PIX_FMT_GBRAPF32BE:
2852  case AV_PIX_FMT_GBRAPF32LE:
2853  *yuv2anyX = yuv2gbrpf32_full_X_c;
2854  break;
2855  }
2856  if (!*yuv2packedX && !*yuv2anyX)
2857  goto YUV_PACKED;
2858  } else {
2859  YUV_PACKED:
2860  switch (dstFormat) {
2861  case AV_PIX_FMT_RGBA64LE:
2862 #if CONFIG_SWSCALE_ALPHA
2863  if (c->needAlpha) {
2864  *yuv2packed1 = yuv2rgba64le_1_c;
2865  *yuv2packed2 = yuv2rgba64le_2_c;
2866  *yuv2packedX = yuv2rgba64le_X_c;
2867  } else
2868 #endif /* CONFIG_SWSCALE_ALPHA */
2869  {
2870  *yuv2packed1 = yuv2rgbx64le_1_c;
2871  *yuv2packed2 = yuv2rgbx64le_2_c;
2872  *yuv2packedX = yuv2rgbx64le_X_c;
2873  }
2874  break;
2875  case AV_PIX_FMT_RGBA64BE:
2876 #if CONFIG_SWSCALE_ALPHA
2877  if (c->needAlpha) {
2878  *yuv2packed1 = yuv2rgba64be_1_c;
2879  *yuv2packed2 = yuv2rgba64be_2_c;
2880  *yuv2packedX = yuv2rgba64be_X_c;
2881  } else
2882 #endif /* CONFIG_SWSCALE_ALPHA */
2883  {
2884  *yuv2packed1 = yuv2rgbx64be_1_c;
2885  *yuv2packed2 = yuv2rgbx64be_2_c;
2886  *yuv2packedX = yuv2rgbx64be_X_c;
2887  }
2888  break;
2889  case AV_PIX_FMT_BGRA64LE:
2890 #if CONFIG_SWSCALE_ALPHA
2891  if (c->needAlpha) {
2892  *yuv2packed1 = yuv2bgra64le_1_c;
2893  *yuv2packed2 = yuv2bgra64le_2_c;
2894  *yuv2packedX = yuv2bgra64le_X_c;
2895  } else
2896 #endif /* CONFIG_SWSCALE_ALPHA */
2897  {
2898  *yuv2packed1 = yuv2bgrx64le_1_c;
2899  *yuv2packed2 = yuv2bgrx64le_2_c;
2900  *yuv2packedX = yuv2bgrx64le_X_c;
2901  }
2902  break;
2903  case AV_PIX_FMT_BGRA64BE:
2904 #if CONFIG_SWSCALE_ALPHA
2905  if (c->needAlpha) {
2906  *yuv2packed1 = yuv2bgra64be_1_c;
2907  *yuv2packed2 = yuv2bgra64be_2_c;
2908  *yuv2packedX = yuv2bgra64be_X_c;
2909  } else
2910 #endif /* CONFIG_SWSCALE_ALPHA */
2911  {
2912  *yuv2packed1 = yuv2bgrx64be_1_c;
2913  *yuv2packed2 = yuv2bgrx64be_2_c;
2914  *yuv2packedX = yuv2bgrx64be_X_c;
2915  }
2916  break;
2917  case AV_PIX_FMT_RGB48LE:
2918  *yuv2packed1 = yuv2rgb48le_1_c;
2919  *yuv2packed2 = yuv2rgb48le_2_c;
2920  *yuv2packedX = yuv2rgb48le_X_c;
2921  break;
2922  case AV_PIX_FMT_RGB48BE:
2923  *yuv2packed1 = yuv2rgb48be_1_c;
2924  *yuv2packed2 = yuv2rgb48be_2_c;
2925  *yuv2packedX = yuv2rgb48be_X_c;
2926  break;
2927  case AV_PIX_FMT_BGR48LE:
2928  *yuv2packed1 = yuv2bgr48le_1_c;
2929  *yuv2packed2 = yuv2bgr48le_2_c;
2930  *yuv2packedX = yuv2bgr48le_X_c;
2931  break;
2932  case AV_PIX_FMT_BGR48BE:
2933  *yuv2packed1 = yuv2bgr48be_1_c;
2934  *yuv2packed2 = yuv2bgr48be_2_c;
2935  *yuv2packedX = yuv2bgr48be_X_c;
2936  break;
2937  case AV_PIX_FMT_RGB32:
2938  case AV_PIX_FMT_BGR32:
2939 #if CONFIG_SMALL
2940  *yuv2packed1 = yuv2rgb32_1_c;
2941  *yuv2packed2 = yuv2rgb32_2_c;
2942  *yuv2packedX = yuv2rgb32_X_c;
2943 #else
2944 #if CONFIG_SWSCALE_ALPHA
2945  if (c->needAlpha) {
2946  *yuv2packed1 = yuv2rgba32_1_c;
2947  *yuv2packed2 = yuv2rgba32_2_c;
2948  *yuv2packedX = yuv2rgba32_X_c;
2949  } else
2950 #endif /* CONFIG_SWSCALE_ALPHA */
2951  {
2952  *yuv2packed1 = yuv2rgbx32_1_c;
2953  *yuv2packed2 = yuv2rgbx32_2_c;
2954  *yuv2packedX = yuv2rgbx32_X_c;
2955  }
2956 #endif /* !CONFIG_SMALL */
2957  break;
2958  case AV_PIX_FMT_RGB32_1:
2959  case AV_PIX_FMT_BGR32_1:
2960 #if CONFIG_SMALL
2961  *yuv2packed1 = yuv2rgb32_1_1_c;
2962  *yuv2packed2 = yuv2rgb32_1_2_c;
2963  *yuv2packedX = yuv2rgb32_1_X_c;
2964 #else
2965 #if CONFIG_SWSCALE_ALPHA
2966  if (c->needAlpha) {
2967  *yuv2packed1 = yuv2rgba32_1_1_c;
2968  *yuv2packed2 = yuv2rgba32_1_2_c;
2969  *yuv2packedX = yuv2rgba32_1_X_c;
2970  } else
2971 #endif /* CONFIG_SWSCALE_ALPHA */
2972  {
2973  *yuv2packed1 = yuv2rgbx32_1_1_c;
2974  *yuv2packed2 = yuv2rgbx32_1_2_c;
2975  *yuv2packedX = yuv2rgbx32_1_X_c;
2976  }
2977 #endif /* !CONFIG_SMALL */
2978  break;
2979  case AV_PIX_FMT_RGB24:
2980  *yuv2packed1 = yuv2rgb24_1_c;
2981  *yuv2packed2 = yuv2rgb24_2_c;
2982  *yuv2packedX = yuv2rgb24_X_c;
2983  break;
2984  case AV_PIX_FMT_BGR24:
2985  *yuv2packed1 = yuv2bgr24_1_c;
2986  *yuv2packed2 = yuv2bgr24_2_c;
2987  *yuv2packedX = yuv2bgr24_X_c;
2988  break;
2989  case AV_PIX_FMT_RGB565LE:
2990  case AV_PIX_FMT_RGB565BE:
2991  case AV_PIX_FMT_BGR565LE:
2992  case AV_PIX_FMT_BGR565BE:
2993  *yuv2packed1 = yuv2rgb16_1_c;
2994  *yuv2packed2 = yuv2rgb16_2_c;
2995  *yuv2packedX = yuv2rgb16_X_c;
2996  break;
2997  case AV_PIX_FMT_RGB555LE:
2998  case AV_PIX_FMT_RGB555BE:
2999  case AV_PIX_FMT_BGR555LE:
3000  case AV_PIX_FMT_BGR555BE:
3001  *yuv2packed1 = yuv2rgb15_1_c;
3002  *yuv2packed2 = yuv2rgb15_2_c;
3003  *yuv2packedX = yuv2rgb15_X_c;
3004  break;
3005  case AV_PIX_FMT_RGB444LE:
3006  case AV_PIX_FMT_RGB444BE:
3007  case AV_PIX_FMT_BGR444LE:
3008  case AV_PIX_FMT_BGR444BE:
3009  *yuv2packed1 = yuv2rgb12_1_c;
3010  *yuv2packed2 = yuv2rgb12_2_c;
3011  *yuv2packedX = yuv2rgb12_X_c;
3012  break;
3013  case AV_PIX_FMT_RGB8:
3014  case AV_PIX_FMT_BGR8:
3015  *yuv2packed1 = yuv2rgb8_1_c;
3016  *yuv2packed2 = yuv2rgb8_2_c;
3017  *yuv2packedX = yuv2rgb8_X_c;
3018  break;
3019  case AV_PIX_FMT_RGB4:
3020  case AV_PIX_FMT_BGR4:
3021  *yuv2packed1 = yuv2rgb4_1_c;
3022  *yuv2packed2 = yuv2rgb4_2_c;
3023  *yuv2packedX = yuv2rgb4_X_c;
3024  break;
3025  case AV_PIX_FMT_RGB4_BYTE:
3026  case AV_PIX_FMT_BGR4_BYTE:
3027  *yuv2packed1 = yuv2rgb4b_1_c;
3028  *yuv2packed2 = yuv2rgb4b_2_c;
3029  *yuv2packedX = yuv2rgb4b_X_c;
3030  break;
3031  case AV_PIX_FMT_X2RGB10LE:
3032  case AV_PIX_FMT_X2RGB10BE:
3033  *yuv2packed1 = yuv2x2rgb10_1_c;
3034  *yuv2packed2 = yuv2x2rgb10_2_c;
3035  *yuv2packedX = yuv2x2rgb10_X_c;
3036  break;
3037  case AV_PIX_FMT_X2BGR10LE:
3038  case AV_PIX_FMT_X2BGR10BE:
3039  *yuv2packed1 = yuv2x2bgr10_1_c;
3040  *yuv2packed2 = yuv2x2bgr10_2_c;
3041  *yuv2packedX = yuv2x2bgr10_X_c;
3042  break;
3043  }
3044  }
3045  switch (dstFormat) {
3046  case AV_PIX_FMT_MONOWHITE:
3047  *yuv2packed1 = yuv2monowhite_1_c;
3048  *yuv2packed2 = yuv2monowhite_2_c;
3049  *yuv2packedX = yuv2monowhite_X_c;
3050  break;
3051  case AV_PIX_FMT_MONOBLACK:
3052  *yuv2packed1 = yuv2monoblack_1_c;
3053  *yuv2packed2 = yuv2monoblack_2_c;
3054  *yuv2packedX = yuv2monoblack_X_c;
3055  break;
3056  case AV_PIX_FMT_YUYV422:
3057  *yuv2packed1 = yuv2yuyv422_1_c;
3058  *yuv2packed2 = yuv2yuyv422_2_c;
3059  *yuv2packedX = yuv2yuyv422_X_c;
3060  break;
3061  case AV_PIX_FMT_YVYU422:
3062  *yuv2packed1 = yuv2yvyu422_1_c;
3063  *yuv2packed2 = yuv2yvyu422_2_c;
3064  *yuv2packedX = yuv2yvyu422_X_c;
3065  break;
3066  case AV_PIX_FMT_UYVY422:
3067  *yuv2packed1 = yuv2uyvy422_1_c;
3068  *yuv2packed2 = yuv2uyvy422_2_c;
3069  *yuv2packedX = yuv2uyvy422_X_c;
3070  break;
3071  case AV_PIX_FMT_YA8:
3072  *yuv2packed1 = yuv2ya8_1_c;
3073  *yuv2packed2 = yuv2ya8_2_c;
3074  *yuv2packedX = yuv2ya8_X_c;
3075  break;
3076  case AV_PIX_FMT_YA16LE:
3077  *yuv2packed1 = yuv2ya16le_1_c;
3078  *yuv2packed2 = yuv2ya16le_2_c;
3079  *yuv2packedX = yuv2ya16le_X_c;
3080  break;
3081  case AV_PIX_FMT_YA16BE:
3082  *yuv2packed1 = yuv2ya16be_1_c;
3083  *yuv2packed2 = yuv2ya16be_2_c;
3084  *yuv2packedX = yuv2ya16be_X_c;
3085  break;
3086  case AV_PIX_FMT_AYUV64LE:
3087  *yuv2packedX = yuv2ayuv64le_X_c;
3088  break;
3089  }
3090 }
yuv2p010cX_BE_c
static void yuv2p010cX_BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:557
yuv2packed2_fn
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:222
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:116
yuv2packed1_fn
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:189
YUV2PACKEDWRAPPER
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:743
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_PIX_FMT_BGR48LE
@ AV_PIX_FMT_BGR48LE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:139
av_clip
#define av_clip
Definition: common.h:96
ff_dither_4x4_16
const uint8_t ff_dither_4x4_16[][8]
Definition: output.c:51
X_DITHER
#define X_DITHER(u, v)
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:133
AV_PIX_FMT_BGRA64BE
@ AV_PIX_FMT_BGRA64BE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:197
yuv2rgb_X_c_template
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1680
mem_internal.h
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:379
AV_PIX_FMT_RGB444LE
@ AV_PIX_FMT_RGB444LE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:129
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:164
AV_PIX_FMT_GBRP10BE
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:162
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
ff_dither_8x8_32
const uint8_t ff_dither_8x8_32[][8]
Definition: output.c:59
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
yuv2rgb_full_1_c_template
static av_always_inline void yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2132
yuv2rgba64_full_X_c_template
static av_always_inline void yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1275
SWS_DITHER_A_DITHER
@ SWS_DITHER_A_DITHER
Definition: swscale_internal.h:75
accumulate_bit
#define accumulate_bit(acc, val)
Definition: output.c:568
pixdesc.h
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:195
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:311
AV_PIX_FMT_X2BGR10BE
@ AV_PIX_FMT_X2BGR10BE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:354
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:308
R
#define R
Definition: huffyuvdsp.h:34
yuv2nv12cX_16_c_template
static av_always_inline void yuv2nv12cX_16_c_template(int big_endian, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW, int output_bits)
Definition: output.c:184
b
#define b
Definition: input.c:40
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:378
yuv2rgba64_X_c_template
static av_always_inline void yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1025
b_r
#define b_r
AV_PIX_FMT_GBRP14BE
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
Definition: pixfmt.h:246
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
mathematics.h
yuv2rgb_full_X_c_template
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2037
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
yuv2422_2_c_template
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:836
av_float2int
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
yuv2plane1_8_c
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:415
AV_PIX_FMT_GRAYF32LE
@ AV_PIX_FMT_GRAYF32LE
IEEE-754 single precision Y, 32bpp, little-endian.
Definition: pixfmt.h:331
yuv2planeX_10_c_template
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:335
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AV_PIX_FMT_AYUV64LE
@ AV_PIX_FMT_AYUV64LE
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
Definition: pixfmt.h:269
SH
#define SH(val, pdst)
Definition: generic_macros_msa.h:154
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:278
A
#define A(x)
Definition: vp56_arith.h:28
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:696
rgb
Definition: rpzaenc.c:59
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
yuv2anyX_fn
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:288
yuv2422_X_c_template
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:797
U
#define U(x)
Definition: vp56_arith.h:37
yuv2mono_1_c_template
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:693
yuv2plane1_16_c_template
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:144
yuv2422_1_c_template
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:869
yuv2gbrp_full_X_c
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Definition: output.c:2213
val
static double val(void *priv, double ch)
Definition: aeval.c:76
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:710
R_B
#define R_B
Definition: output.c:919
yuv2plane1_float
#define yuv2plane1_float(template, dest_type, BE_LE)
Definition: output.c:285
av_bswap32
#define av_bswap32
Definition: bswap.h:33
yuv2planeX_16_c_template
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:158
r_b
#define r_b
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
avassert.h
av_cold
#define av_cold
Definition: attributes.h:90
yuv2mono_2_c_template
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:632
yuv2planeX_float
#define yuv2planeX_float(template, dest_type, BE_LE)
Definition: output.c:292
YUVRGB_TABLE_HEADROOM
#define YUVRGB_TABLE_HEADROOM
Definition: swscale_internal.h:43
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale_internal.h:74
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:206
yuv2rgb_full_2_c_template
static av_always_inline void yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2087
intreadwrite.h
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:165
output_pixels
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:778
g
const char * g
Definition: vf_curves.c:117
yuv2p010l1_LE_c
static void yuv2p010l1_LE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:521
yuv2p010cX_LE_c
static void yuv2p010cX_LE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:549
AV_PIX_FMT_GBRP12LE
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
Definition: pixfmt.h:245
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
yuv2plane1_float_bswap_c_template
static av_always_inline void yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
Definition: output.c:230
ff_dither_2x2_4
const uint8_t ff_dither_2x2_4[][8]
Definition: output.c:39
ff_dither_8x8_220
const uint8_t ff_dither_8x8_220[][8]
Definition: output.c:84
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
yuv2ya8_X_c
static void yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2503
YUV2PACKED16WRAPPER
#define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes)
Definition: output.c:1471
yuv2mono_X_c_template
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:579
AV_PIX_FMT_RGB4
@ AV_PIX_FMT_RGB4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:87
AV_PIX_FMT_GBRP10LE
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:163
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:380
yuv2rgba64_full_2_c_template
static av_always_inline void yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1339
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
if
if(ret)
Definition: filter_design.txt:179
isSemiPlanarYUV
static av_always_inline int isSemiPlanarYUV(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:742
yuv2nv12cX_16BE_c
static void yuv2nv12cX_16BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:392
yuv2NBPS
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:355
SWS_DITHER_NONE
@ SWS_DITHER_NONE
Definition: swscale_internal.h:71
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:310
AV_PIX_FMT_GBRAP12BE
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
Definition: pixfmt.h:277
av_clip_int16
#define av_clip_int16
Definition: common.h:111
NULL
#define NULL
Definition: coverity.c:32
yuv2rgba64_full_1_c_template
static av_always_inline void yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1391
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:203
yuv2gbrp16_full_X_c
static void yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2295
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_BGR565LE
@ AV_PIX_FMT_BGR565LE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:111
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:196
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
yuv2ya8_2_c
static void yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Definition: output.c:2472
AV_PIX_FMT_BGR4
@ AV_PIX_FMT_BGR4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:84
AV_PIX_FMT_BGR555BE
@ AV_PIX_FMT_BGR555BE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:112
yuv2plane1_float_c_template
static av_always_inline void yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
Definition: output.c:214
A2
#define A2
Definition: binkdsp.c:32
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
yuv2p010lX_c
static void yuv2p010lX_c(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian)
Definition: output.c:481
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:85
A_DITHER
#define A_DITHER(u, v)
AV_PIX_FMT_X2RGB10LE
@ AV_PIX_FMT_X2RGB10LE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:351
isDataInHighBits
static av_always_inline int isDataInHighBits(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:898
d64
const uint8_t * d64
Definition: yuv2rgb.c:502
AV_PIX_FMT_X2BGR10
#define AV_PIX_FMT_X2BGR10
Definition: pixfmt.h:458
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:717
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_PIX_FMT_RGB444BE
@ AV_PIX_FMT_RGB444BE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:130
SWS_FULL_CHR_H_INT
#define SWS_FULL_CHR_H_INT
Definition: swscale.h:80
yuv2planeX_float_bswap_c_template
static av_always_inline void yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint32_t *dest, int dstW)
Definition: output.c:266
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:202
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:397
AV_PIX_FMT_GBRP9BE
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
Definition: pixfmt.h:160
AV_PIX_FMT_BGR444BE
@ AV_PIX_FMT_BGR444BE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:132
output_pixel
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:921
AV_PIX_FMT_GBRP9LE
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
Definition: pixfmt.h:161
yuv2ya16_2_c_template
static av_always_inline void yuv2ya16_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
Definition: output.c:967
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:377
AV_PIX_FMT_GBRAP10LE
@ AV_PIX_FMT_GBRAP10LE
planar GBR 4:4:4:4 40bpp, little-endian
Definition: pixfmt.h:281
isSwappedChroma
static av_always_inline int isSwappedChroma(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:917
AV_PIX_FMT_BGR565BE
@ AV_PIX_FMT_BGR565BE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:110
yuv2nv12cX_c
static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:425
av_bswap16
#define av_bswap16
Definition: bswap.h:31
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
yuv2rgb_write_full
static av_always_inline void yuv2rgb_write_full(SwsContext *c, uint8_t *dest, int i, int Y, int A, int U, int V, int y, enum AVPixelFormat target, int hasAlpha, int err[4])
Definition: output.c:1886
ff_dither_8x8_73
const uint8_t ff_dither_8x8_73[][8]
Definition: output.c:71
Y
#define Y
Definition: boxblur.h:37
yuv2rgb_write
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1554
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_PIX_FMT_BGRA64LE
@ AV_PIX_FMT_BGRA64LE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:198
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
yuv2planeX_8_c
static void yuv2planeX_8_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:400
yuv2p010cX_c
static void yuv2p010cX_c(int big_endian, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:498
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
yuv2p010l1_c
static void yuv2p010l1_c(const int16_t *src, uint16_t *dest, int dstW, int big_endian)
Definition: output.c:468
yuv2ya16_1_c_template
static av_always_inline void yuv2ya16_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
Definition: output.c:999
AV_PIX_FMT_BGR444
#define AV_PIX_FMT_BGR444
Definition: pixfmt.h:398
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:392
yuv2planeX_float_c_template
static av_always_inline void yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src, float *dest, int dstW)
Definition: output.c:246
av_always_inline
#define av_always_inline
Definition: attributes.h:49
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:152
yuv2gbrpf32_full_X_c
static void yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2370
AV_PIX_FMT_X2RGB10
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:457
AV_PIX_FMT_X2RGB10BE
@ AV_PIX_FMT_X2RGB10BE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:352
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:396
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:88
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:309
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:391
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:207
AV_PIX_FMT_YVYU422
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
Definition: pixfmt.h:200
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:2587
G
#define G
Definition: huffyuvdsp.h:33
bswap.h
yuv2p010lX_LE_c
static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:535
AV_PIX_FMT_GRAYF32BE
@ AV_PIX_FMT_GRAYF32BE
IEEE-754 single precision Y, 32bpp, big-endian.
Definition: pixfmt.h:330
yuv2ya8_1_c
static void yuv2ya8_1_c(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y)
Definition: output.c:2446
YUV2RGBWRAPPER
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1853
d128
const uint8_t * d128
Definition: yuv2rgb.c:553
AV_PIX_FMT_GBRP12BE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
Definition: pixfmt.h:244
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:132
B
#define B
Definition: huffyuvdsp.h:32
yuv2packedX_fn
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:254
yuv2p010lX_BE_c
static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:542
yuv2ya16_X_c_template
static av_always_inline void yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **unused_chrUSrc, const int32_t **unused_chrVSrc, int unused_chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
Definition: output.c:929
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_RGB565BE
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:105
av_clip_uint16
#define av_clip_uint16
Definition: common.h:108
shift
static int shift(int a, int b)
Definition: sonic.c:83
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:79
d32
const uint8_t * d32
Definition: yuv2rgb.c:501
avutil.h
AV_PIX_FMT_X2BGR10LE
@ AV_PIX_FMT_X2BGR10LE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:353
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AV_PIX_FMT_BGR555LE
@ AV_PIX_FMT_BGR555LE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:113
yuv2rgb_1_c_template
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1774
SWS_DITHER_AUTO
@ SWS_DITHER_AUTO
Definition: swscale_internal.h:72
B_R
#define B_R
Definition: output.c:920
AV_PIX_FMT_GBRP14LE
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
Definition: pixfmt.h:247
int32_t
int32_t
Definition: audioconvert.c:56
yuv2rgb_2_c_template
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1734
A1
#define A1
Definition: binkdsp.c:31
AV_PIX_FMT_GBRAP10BE
@ AV_PIX_FMT_GBRAP10BE
planar GBR 4:4:4:4 40bpp, big-endian
Definition: pixfmt.h:280
yuv2p010l1_BE_c
static void yuv2p010l1_BE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:528
yuv2rgba64_1_c_template
static av_always_inline void yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1172
yuv2rgba64_2_c_template
static av_always_inline void yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1107
SWS_DITHER_X_DITHER
@ SWS_DITHER_X_DITHER
Definition: swscale_internal.h:76
SwsContext
Definition: swscale_internal.h:300
AV_PIX_FMT_BGR444LE
@ AV_PIX_FMT_BGR444LE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:131
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:261
rgb2rgb.h
swscale.h
yuv2ayuv64le_X_c
static void yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **_lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **_chrUSrc, const int16_t **_chrVSrc, int chrFilterSize, const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2539
ff_dither_2x2_8
const uint8_t ff_dither_2x2_8[][8]
Definition: output.c:45
AV_PIX_FMT_BGR48BE
@ AV_PIX_FMT_BGR48BE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:138
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:393
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:58