FFmpeg
a64multienc.c
Go to the documentation of this file.
1 /*
2  * a64 video encoder - multicolor modes
3  * Copyright (c) 2009 Tobias Bindhammer
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * a64 video encoder - multicolor modes
25  */
26 
27 #include "a64colors.h"
28 #include "a64tables.h"
29 #include "elbg.h"
30 #include "encode.h"
31 #include "internal.h"
32 #include "libavutil/avassert.h"
33 #include "libavutil/common.h"
34 #include "libavutil/intreadwrite.h"
35 
36 #define DITHERSTEPS 8
37 #define CHARSET_CHARS 256
38 #define INTERLACED 1
39 #define CROP_SCREENS 1
40 
41 #define C64XRES 320
42 #define C64YRES 200
43 
44 typedef struct A64Context {
45  /* variables for multicolor modes */
46  struct ELBGContext *elbg;
50  unsigned mc_frame_counter;
52  int *mc_charmap;
53  int *mc_best_cb;
54  int mc_luma_vals[5];
55  uint8_t *mc_colram;
56  uint8_t *mc_palette;
58 
59  /* pts of the next packet that will be output */
60  int64_t next_pts;
61 } A64Context;
62 
63 /* gray gradient */
64 static const uint8_t mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
65 
66 /* other possible gradients - to be tested */
67 //static const uint8_t mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
68 //static const uint8_t mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
69 
70 static void to_meta_with_crop(AVCodecContext *avctx,
71  const AVFrame *p, int *dest)
72 {
73  int blockx, blocky, x, y;
74  int luma = 0;
75  int height = FFMIN(avctx->height, C64YRES);
76  int width = FFMIN(avctx->width , C64XRES);
77  uint8_t *src = p->data[0];
78 
79  for (blocky = 0; blocky < C64YRES; blocky += 8) {
80  for (blockx = 0; blockx < C64XRES; blockx += 8) {
81  for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
82  for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
83  if(x < width && y < height) {
84  if (x + 1 < width) {
85  /* build average over 2 pixels */
86  luma = (src[(x + 0 + y * p->linesize[0])] +
87  src[(x + 1 + y * p->linesize[0])]) / 2;
88  } else {
89  luma = src[(x + y * p->linesize[0])];
90  }
91  /* write blocks as linear data now so they are suitable for elbg */
92  dest[0] = luma;
93  }
94  dest++;
95  }
96  }
97  }
98  }
99 }
100 
101 static void render_charset(AVCodecContext *avctx, uint8_t *charset,
102  uint8_t *colrammap)
103 {
104  A64Context *c = avctx->priv_data;
105  uint8_t row1, row2;
106  int charpos, x, y;
107  int a, b;
108  uint8_t pix;
109  int lowdiff, highdiff;
110  int *best_cb = c->mc_best_cb;
111  uint8_t index1[256];
112  uint8_t index2[256];
113  uint8_t dither[256];
114  int i;
115  int distance;
116 
117  /* Generate lookup-tables for dither and index before looping.
118  * This code relies on c->mc_luma_vals[c->mc_pal_size - 1] being
119  * the maximum of all the mc_luma_vals values and on the minimum
120  * being zero; this ensures that dither is properly initialized. */
121  i = 0;
122  for (a=0; a < 256; a++) {
123  if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
124  distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
125  for(b = 0; b <= distance; b++) {
126  dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
127  }
128  i++;
129  }
130  if(i >= c->mc_pal_size - 1) dither[a] = 0;
131  index1[a] = i;
132  index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
133  }
134 
135  /* and render charset */
136  for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
137  lowdiff = 0;
138  highdiff = 0;
139  for (y = 0; y < 8; y++) {
140  row1 = 0; row2 = 0;
141  for (x = 0; x < 4; x++) {
142  pix = best_cb[y * 4 + x];
143 
144  /* accumulate error for brightest/darkest color */
145  if (index1[pix] >= 3)
146  highdiff += pix - c->mc_luma_vals[3];
147  if (index1[pix] < 1)
148  lowdiff += c->mc_luma_vals[1] - pix;
149 
150  row1 <<= 2;
151 
152  if (INTERLACED) {
153  row2 <<= 2;
154  if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
155  row1 |= 3-(index2[pix] & 3);
156  else
157  row1 |= 3-(index1[pix] & 3);
158 
159  if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
160  row2 |= 3-(index2[pix] & 3);
161  else
162  row2 |= 3-(index1[pix] & 3);
163  }
164  else {
165  if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
166  row1 |= 3-(index2[pix] & 3);
167  else
168  row1 |= 3-(index1[pix] & 3);
169  }
170  }
171  charset[y+0x000] = row1;
172  if (INTERLACED) charset[y+0x800] = row2;
173  }
174  /* do we need to adjust pixels? */
175  if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
176  if (lowdiff > highdiff) {
177  for (x = 0; x < 32; x++)
178  best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
179  } else {
180  for (x = 0; x < 32; x++)
181  best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
182  }
183  charpos--; /* redo now adjusted char */
184  /* no adjustment needed, all fine */
185  } else {
186  /* advance pointers */
187  best_cb += 32;
188  charset += 8;
189 
190  /* remember colorram value */
191  colrammap[charpos] = (highdiff > 0);
192  }
193  }
194 }
195 
197 {
198  A64Context *c = avctx->priv_data;
199 
200  avpriv_elbg_free(&c->elbg);
201 
202  av_freep(&c->mc_meta_charset);
203  av_freep(&c->mc_best_cb);
204  av_freep(&c->mc_charmap);
205  av_freep(&c->mc_colram);
206  return 0;
207 }
208 
210 {
211  A64Context *c = avctx->priv_data;
212  int a;
213  av_lfg_init(&c->randctx, 1);
214 
215  if (avctx->global_quality < 1) {
216  c->mc_lifetime = 4;
217  } else {
218  c->mc_lifetime = avctx->global_quality / FF_QP2LAMBDA;
219  }
220 
221  av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
222 
223  c->mc_frame_counter = 0;
224  c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
225  c->mc_pal_size = 4 + c->mc_use_5col;
226 
227  /* precalc luma values for later use */
228  for (a = 0; a < c->mc_pal_size; a++) {
229  c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
230  a64_palette[mc_colors[a]][1] * 0.59 +
231  a64_palette[mc_colors[a]][2] * 0.11;
232  }
233 
234  if (!(c->mc_meta_charset = av_calloc(c->mc_lifetime, 32000 * sizeof(int))) ||
235  !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
236  !(c->mc_charmap = av_calloc(c->mc_lifetime, 1000 * sizeof(int))) ||
237  !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t)))) {
238  av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
239  return AVERROR(ENOMEM);
240  }
241 
242  /* set up extradata */
243  if (!(avctx->extradata = av_mallocz(8 * 4 + AV_INPUT_BUFFER_PADDING_SIZE))) {
244  av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
245  return AVERROR(ENOMEM);
246  }
247  avctx->extradata_size = 8 * 4;
248  AV_WB32(avctx->extradata, c->mc_lifetime);
249  AV_WB32(avctx->extradata + 16, INTERLACED);
250 
251  if (!avctx->codec_tag)
252  avctx->codec_tag = AV_RL32("a64m");
253 
254  c->next_pts = AV_NOPTS_VALUE;
255 
256  return 0;
257 }
258 
259 static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
260 {
261  int a;
262  uint8_t temp;
263  /* only needs to be done in 5col mode */
264  /* XXX could be squeezed to 0x80 bytes */
265  for (a = 0; a < 256; a++) {
266  temp = colram[charmap[a + 0x000]] << 0;
267  temp |= colram[charmap[a + 0x100]] << 1;
268  temp |= colram[charmap[a + 0x200]] << 2;
269  if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
270  buf[a] = temp << 2;
271  }
272 }
273 
275  const AVFrame *p, int *got_packet)
276 {
277  A64Context *c = avctx->priv_data;
278 
279  int frame;
280  int x, y;
281  int b_height;
282  int b_width;
283 
284  int req_size, ret;
285  uint8_t *buf = NULL;
286 
287  int *charmap = c->mc_charmap;
288  uint8_t *colram = c->mc_colram;
289  int *meta = c->mc_meta_charset;
290  int *best_cb = c->mc_best_cb;
291 
292  int charset_size = 0x800 * (INTERLACED + 1);
293  int colram_size = 0x100 * c->mc_use_5col;
294  int screen_size;
295 
296  if(CROP_SCREENS) {
297  b_height = FFMIN(avctx->height,C64YRES) >> 3;
298  b_width = FFMIN(avctx->width ,C64XRES) >> 3;
299  screen_size = b_width * b_height;
300  } else {
301  b_height = C64YRES >> 3;
302  b_width = C64XRES >> 3;
303  screen_size = 0x400;
304  }
305 
306  /* no data, means end encoding asap */
307  if (!p) {
308  /* all done, end encoding */
309  if (!c->mc_lifetime) return 0;
310  /* no more frames in queue, prepare to flush remaining frames */
311  if (!c->mc_frame_counter) {
312  c->mc_lifetime = 0;
313  }
314  /* still frames in queue so limit lifetime to remaining frames */
315  else c->mc_lifetime = c->mc_frame_counter;
316  /* still new data available */
317  } else {
318  /* fill up mc_meta_charset with data until lifetime exceeds */
319  if (c->mc_frame_counter < c->mc_lifetime) {
320  to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
321  c->mc_frame_counter++;
322  if (c->next_pts == AV_NOPTS_VALUE)
323  c->next_pts = p->pts;
324  /* lifetime is not reached so wait for next frame first */
325  return 0;
326  }
327  }
328 
329  /* lifetime reached so now convert X frames at once */
330  if (c->mc_frame_counter == c->mc_lifetime) {
331  req_size = 0;
332  /* any frames to encode? */
333  if (c->mc_lifetime) {
334  int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
335  if ((ret = ff_get_encode_buffer(avctx, pkt, alloc_size, 0)) < 0)
336  return ret;
337  buf = pkt->data;
338 
339  /* calc optimal new charset + charmaps */
340  ret = avpriv_elbg_do(&c->elbg, meta, 32, 1000 * c->mc_lifetime,
341  best_cb, CHARSET_CHARS, 50, charmap, &c->randctx, 0);
342  if (ret < 0)
343  return ret;
344 
345  /* create colorram map and a c64 readable charset */
346  render_charset(avctx, buf, colram);
347 
348  /* advance pointers */
349  buf += charset_size;
350  req_size += charset_size;
351  }
352 
353  /* write x frames to buf */
354  for (frame = 0; frame < c->mc_lifetime; frame++) {
355  /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
356  for (y = 0; y < b_height; y++) {
357  for (x = 0; x < b_width; x++) {
358  buf[y * b_width + x] = charmap[y * b_width + x];
359  }
360  }
361  /* advance pointers */
362  buf += screen_size;
363  req_size += screen_size;
364 
365  /* compress and copy colram to buf */
366  if (c->mc_use_5col) {
367  a64_compress_colram(buf, charmap, colram);
368  /* advance pointers */
369  buf += colram_size;
370  req_size += colram_size;
371  }
372 
373  /* advance to next charmap */
374  charmap += 1000;
375  }
376 
377  AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
378  AV_WB32(avctx->extradata + 8, charset_size);
379  AV_WB32(avctx->extradata + 12, screen_size + colram_size);
380 
381  /* reset counter */
382  c->mc_frame_counter = 0;
383 
384  pkt->pts = pkt->dts = c->next_pts;
385  c->next_pts = AV_NOPTS_VALUE;
386 
387  av_assert0(pkt->size == req_size);
388  *got_packet = !!req_size;
389  }
390  return 0;
391 }
392 
393 #if CONFIG_A64MULTI_ENCODER
395  .name = "a64multi",
396  .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
397  .type = AVMEDIA_TYPE_VIDEO,
398  .id = AV_CODEC_ID_A64_MULTI,
399  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
400  .priv_data_size = sizeof(A64Context),
402  .encode2 = a64multi_encode_frame,
403  .close = a64multi_close_encoder,
406 };
407 #endif
408 #if CONFIG_A64MULTI5_ENCODER
410  .name = "a64multi5",
411  .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
412  .type = AVMEDIA_TYPE_VIDEO,
414  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
415  .priv_data_size = sizeof(A64Context),
417  .encode2 = a64multi_encode_frame,
418  .close = a64multi_close_encoder,
421 };
422 #endif
mc_colors
static const uint8_t mc_colors[5]
Definition: a64multienc.c:64
AVCodec
AVCodec.
Definition: codec.h:202
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
a64multi_close_encoder
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
Definition: a64multienc.c:196
A64Context
Definition: a64multienc.c:44
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DITHERSTEPS
#define DITHERSTEPS
Definition: a64multienc.c:36
CROP_SCREENS
#define CROP_SCREENS
Definition: a64multienc.c:39
av_lfg_init
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
Definition: lfg.c:32
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
A64Context::mc_charmap
int * mc_charmap
Definition: a64multienc.c:52
AV_CODEC_ID_A64_MULTI
@ AV_CODEC_ID_A64_MULTI
Definition: codec_id.h:193
encode.h
b
#define b
Definition: input.c:40
a64multi_encode_init
static av_cold int a64multi_encode_init(AVCodecContext *avctx)
Definition: a64multienc.c:209
a64colors.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
A64Context::mc_palette
uint8_t * mc_palette
Definition: a64multienc.c:56
A64Context::mc_luma_vals
int mc_luma_vals[5]
Definition: a64multienc.c:54
CHARSET_CHARS
#define CHARSET_CHARS
Definition: a64multienc.c:37
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
init
static int init
Definition: av_tx.c:47
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
ff_a64multi5_encoder
const AVCodec ff_a64multi5_encoder
A64Context::randctx
AVLFG randctx
Definition: a64multienc.c:47
A64Context::mc_use_5col
int mc_use_5col
Definition: a64multienc.c:49
A64Context::mc_meta_charset
int * mc_meta_charset
Definition: a64multienc.c:51
avpriv_elbg_do
int avpriv_elbg_do(ELBGContext **elbgp, int *points, int dim, int numpoints, int *codebook, int num_cb, int max_steps, int *closest_cb, AVLFG *rand_state, uintptr_t flags)
Implementation of the Enhanced LBG Algorithm Based on the paper "Neural Networks 14:1219-1237" that c...
Definition: elbg.c:446
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
a64multi_encode_frame
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet)
Definition: a64multienc.c:274
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
width
#define width
C64XRES
#define C64XRES
Definition: a64multienc.c:41
intreadwrite.h
to_meta_with_crop
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
Definition: a64multienc.c:70
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:449
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
C64YRES
#define C64YRES
Definition: a64multienc.c:42
render_charset
static void render_charset(AVCodecContext *avctx, uint8_t *charset, uint8_t *colrammap)
Definition: a64multienc.c:101
multi_dither_patterns
static const uint8_t multi_dither_patterns[9][4][4]
dither patterns used vor rendering the multicolor charset
Definition: a64tables.h:36
A64Context::mc_pal_size
int mc_pal_size
Definition: a64multienc.c:57
A64Context::mc_colram
uint8_t * mc_colram
Definition: a64multienc.c:55
elbg.h
NULL
#define NULL
Definition: coverity.c:32
A64Context::mc_lifetime
int mc_lifetime
Definition: a64multienc.c:48
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
A64Context::mc_best_cb
int * mc_best_cb
Definition: a64multienc.c:53
AVLFG
Context structure for the Lagged Fibonacci PRNG.
Definition: lfg.h:33
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
INTERLACED
#define INTERLACED
Definition: a64multienc.c:38
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
avpriv_elbg_free
av_cold void avpriv_elbg_free(ELBGContext **elbgp)
Free an ELBGContext and reset the pointer to it.
Definition: elbg.c:499
a64_compress_colram
static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
Definition: a64multienc.c:259
A64Context::elbg
struct ELBGContext * elbg
Definition: a64multienc.c:46
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
a64_palette
static const uint8_t a64_palette[16][3]
Definition: a64colors.h:33
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
interlaced_dither_patterns
static const uint8_t interlaced_dither_patterns[9][8][4]
Definition: a64tables.h:93
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
ELBGContext
ELBG internal data.
Definition: elbg.c:46
a64tables.h
common.h
A64Context::mc_frame_counter
unsigned mc_frame_counter
Definition: a64multienc.c:50
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_A64_MULTI5
@ AV_CODEC_ID_A64_MULTI5
Definition: codec_id.h:194
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
ff_a64multi_encoder
const AVCodec ff_a64multi_encoder
AVCodecContext::height
int height
Definition: avcodec.h:556
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:78
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
temp
else temp
Definition: vf_mcdeint.c:248
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:233
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
A64Context::next_pts
int64_t next_pts
Definition: a64multienc.c:60
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:58