FFmpeg
ulti.c
Go to the documentation of this file.
1 /*
2  * IBM Ultimotion Video Decoder
3  * Copyright (C) 2004 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * IBM Ultimotion Video Decoder.
25  */
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "internal.h"
34 
35 #include "ulti_cb.h"
36 
37 typedef struct UltimotionDecodeContext {
44 
46 {
48 
49  s->avctx = avctx;
50  s->width = avctx->width;
51  s->height = avctx->height;
52  s->blocks = (s->width / 8) * (s->height / 8);
53  if (s->blocks == 0)
54  return AVERROR_INVALIDDATA;
55  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
56  s->ulti_codebook = ulti_codebook;
57 
58  s->frame = av_frame_alloc();
59  if (!s->frame)
60  return AVERROR(ENOMEM);
61 
62  return 0;
63 }
64 
66 {
68 
69  av_frame_free(&s->frame);
70 
71  return 0;
72 }
73 
74 static const int block_coords[8] = // 4x4 block coords in 8x8 superblock
75  { 0, 0, 0, 4, 4, 4, 4, 0};
76 
77 static const int angle_by_index[4] = { 0, 2, 6, 12};
78 
79 /* Lookup tables for luma and chroma - used by ulti_convert_yuv() */
80 static const uint8_t ulti_lumas[64] =
81  { 0x10, 0x13, 0x17, 0x1A, 0x1E, 0x21, 0x25, 0x28,
82  0x2C, 0x2F, 0x33, 0x36, 0x3A, 0x3D, 0x41, 0x44,
83  0x48, 0x4B, 0x4F, 0x52, 0x56, 0x59, 0x5C, 0x60,
84  0x63, 0x67, 0x6A, 0x6E, 0x71, 0x75, 0x78, 0x7C,
85  0x7F, 0x83, 0x86, 0x8A, 0x8D, 0x91, 0x94, 0x98,
86  0x9B, 0x9F, 0xA2, 0xA5, 0xA9, 0xAC, 0xB0, 0xB3,
87  0xB7, 0xBA, 0xBE, 0xC1, 0xC5, 0xC8, 0xCC, 0xCF,
88  0xD3, 0xD6, 0xDA, 0xDD, 0xE1, 0xE4, 0xE8, 0xEB};
89 
90 static const uint8_t ulti_chromas[16] =
91  { 0x60, 0x67, 0x6D, 0x73, 0x7A, 0x80, 0x86, 0x8D,
92  0x93, 0x99, 0xA0, 0xA6, 0xAC, 0xB3, 0xB9, 0xC0};
93 
94 /* convert Ultimotion YUV block (sixteen 6-bit Y samples and
95  two 4-bit chroma samples) into standard YUV and put it into frame */
96 static void ulti_convert_yuv(AVFrame *frame, int x, int y,
97  uint8_t *luma,int chroma)
98 {
99  uint8_t *y_plane, *cr_plane, *cb_plane;
100  int i;
101 
102  y_plane = frame->data[0] + x + y * frame->linesize[0];
103  cr_plane = frame->data[1] + (x / 4) + (y / 4) * frame->linesize[1];
104  cb_plane = frame->data[2] + (x / 4) + (y / 4) * frame->linesize[2];
105 
106  cr_plane[0] = ulti_chromas[chroma >> 4];
107 
108  cb_plane[0] = ulti_chromas[chroma & 0xF];
109 
110 
111  for(i = 0; i < 16; i++){
112  y_plane[i & 3] = ulti_lumas[luma[i]];
113  if((i & 3) == 3) { //next row
114  y_plane += frame->linesize[0];
115  }
116  }
117 }
118 
119 /* generate block like in MS Video1 */
120 static void ulti_pattern(AVFrame *frame, int x, int y,
121  int f0, int f1, int Y0, int Y1, int chroma)
122 {
123  uint8_t Luma[16];
124  int mask, i;
125  for(mask = 0x80, i = 0; mask; mask >>= 1, i++) {
126  if(f0 & mask)
127  Luma[i] = Y1;
128  else
129  Luma[i] = Y0;
130  }
131 
132  for(mask = 0x80, i = 8; mask; mask >>= 1, i++) {
133  if(f1 & mask)
134  Luma[i] = Y1;
135  else
136  Luma[i] = Y0;
137  }
138 
139  ulti_convert_yuv(frame, x, y, Luma, chroma);
140 }
141 
142 /* fill block with some gradient */
143 static void ulti_grad(AVFrame *frame, int x, int y, uint8_t *Y, int chroma, int angle)
144 {
145  uint8_t Luma[16];
146  if(angle & 8) { //reverse order
147  int t;
148  angle &= 0x7;
149  t = Y[0];
150  Y[0] = Y[3];
151  Y[3] = t;
152  t = Y[1];
153  Y[1] = Y[2];
154  Y[2] = t;
155  }
156  switch(angle){
157  case 0:
158  Luma[0] = Y[0]; Luma[1] = Y[1]; Luma[2] = Y[2]; Luma[3] = Y[3];
159  Luma[4] = Y[0]; Luma[5] = Y[1]; Luma[6] = Y[2]; Luma[7] = Y[3];
160  Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[2]; Luma[11] = Y[3];
161  Luma[12] = Y[0]; Luma[13] = Y[1]; Luma[14] = Y[2]; Luma[15] = Y[3];
162  break;
163  case 1:
164  Luma[0] = Y[1]; Luma[1] = Y[2]; Luma[2] = Y[3]; Luma[3] = Y[3];
165  Luma[4] = Y[0]; Luma[5] = Y[1]; Luma[6] = Y[2]; Luma[7] = Y[3];
166  Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[2]; Luma[11] = Y[3];
167  Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[1]; Luma[15] = Y[2];
168  break;
169  case 2:
170  Luma[0] = Y[1]; Luma[1] = Y[2]; Luma[2] = Y[3]; Luma[3] = Y[3];
171  Luma[4] = Y[1]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[3];
172  Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[2];
173  Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[1]; Luma[15] = Y[2];
174  break;
175  case 3:
176  Luma[0] = Y[2]; Luma[1] = Y[3]; Luma[2] = Y[3]; Luma[3] = Y[3];
177  Luma[4] = Y[1]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[3];
178  Luma[8] = Y[0]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[2];
179  Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[0]; Luma[15] = Y[1];
180  break;
181  case 4:
182  Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[3]; Luma[3] = Y[3];
183  Luma[4] = Y[2]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[2];
184  Luma[8] = Y[1]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[1];
185  Luma[12] = Y[0]; Luma[13] = Y[0]; Luma[14] = Y[0]; Luma[15] = Y[0];
186  break;
187  case 5:
188  Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[3]; Luma[3] = Y[2];
189  Luma[4] = Y[3]; Luma[5] = Y[2]; Luma[6] = Y[2]; Luma[7] = Y[1];
190  Luma[8] = Y[2]; Luma[9] = Y[1]; Luma[10] = Y[1]; Luma[11] = Y[0];
191  Luma[12] = Y[1]; Luma[13] = Y[0]; Luma[14] = Y[0]; Luma[15] = Y[0];
192  break;
193  case 6:
194  Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[2]; Luma[3] = Y[2];
195  Luma[4] = Y[3]; Luma[5] = Y[2]; Luma[6] = Y[1]; Luma[7] = Y[1];
196  Luma[8] = Y[2]; Luma[9] = Y[2]; Luma[10] = Y[1]; Luma[11] = Y[0];
197  Luma[12] = Y[1]; Luma[13] = Y[1]; Luma[14] = Y[0]; Luma[15] = Y[0];
198  break;
199  case 7:
200  Luma[0] = Y[3]; Luma[1] = Y[3]; Luma[2] = Y[2]; Luma[3] = Y[1];
201  Luma[4] = Y[3]; Luma[5] = Y[2]; Luma[6] = Y[1]; Luma[7] = Y[0];
202  Luma[8] = Y[3]; Luma[9] = Y[2]; Luma[10] = Y[1]; Luma[11] = Y[0];
203  Luma[12] = Y[2]; Luma[13] = Y[1]; Luma[14] = Y[0]; Luma[15] = Y[0];
204  break;
205  default:
206  Luma[0] = Y[0]; Luma[1] = Y[0]; Luma[2] = Y[1]; Luma[3] = Y[1];
207  Luma[4] = Y[0]; Luma[5] = Y[0]; Luma[6] = Y[1]; Luma[7] = Y[1];
208  Luma[8] = Y[2]; Luma[9] = Y[2]; Luma[10] = Y[3]; Luma[11] = Y[3];
209  Luma[12] = Y[2]; Luma[13] = Y[2]; Luma[14] = Y[3]; Luma[15] = Y[3];
210  break;
211  }
212 
213  ulti_convert_yuv(frame, x, y, Luma, chroma);
214 }
215 
217  void *data, int *got_frame,
218  AVPacket *avpkt)
219 {
220  const uint8_t *buf = avpkt->data;
221  int buf_size = avpkt->size;
223  int modifier = 0;
224  int uniq = 0;
225  int mode = 0;
226  int blocks = 0;
227  int done = 0;
228  int x = 0, y = 0;
229  int i, ret;
230  int skip;
231  int tmp;
232 
233  if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0)
234  return ret;
235 
236  bytestream2_init(&s->gb, buf, buf_size);
237 
238  while(!done) {
239  int idx;
240  if(blocks >= s->blocks || y >= s->height)
241  break;//all blocks decoded
242 
243  if (bytestream2_get_bytes_left(&s->gb) < 1)
244  goto err;
245  idx = bytestream2_get_byteu(&s->gb);
246  if((idx & 0xF8) == 0x70) {
247  switch(idx) {
248  case 0x70: //change modifier
249  modifier = bytestream2_get_byte(&s->gb);
250  if(modifier>1)
251  av_log(avctx, AV_LOG_INFO, "warning: modifier must be 0 or 1, got %i\n", modifier);
252  break;
253  case 0x71: // set uniq flag
254  uniq = 1;
255  break;
256  case 0x72: //toggle mode
257  mode = !mode;
258  break;
259  case 0x73: //end-of-frame
260  done = 1;
261  break;
262  case 0x74: //skip some blocks
263  skip = bytestream2_get_byte(&s->gb);
264  if ((blocks + skip) >= s->blocks)
265  break;
266  blocks += skip;
267  x += skip * 8;
268  while(x >= s->width) {
269  x -= s->width;
270  y += 8;
271  }
272  break;
273  default:
274  av_log(avctx, AV_LOG_INFO, "warning: unknown escape 0x%02X\n", idx);
275  }
276  } else { //handle one block
277  int code;
278  int cf;
279  int angle = 0;
280  uint8_t Y[4]; // luma samples of block
281  int tx = 0, ty = 0; //coords of subblock
282  int chroma = 0;
283  if (mode || uniq) {
284  uniq = 0;
285  cf = 1;
286  chroma = 0;
287  } else {
288  cf = 0;
289  if (idx) {
290  chroma = bytestream2_get_byte(&s->gb);
291  }
292  }
293  for (i = 0; i < 4; i++) { // for every subblock
294  code = (idx >> (6 - i*2)) & 3; //extract 2 bits
295  if(!code) //skip subblock
296  continue;
297  if(cf) {
298  chroma = bytestream2_get_byte(&s->gb);
299  }
300  tx = x + block_coords[i * 2];
301  ty = y + block_coords[(i * 2) + 1];
302  switch(code) {
303  case 1:
304  tmp = bytestream2_get_byte(&s->gb);
305 
306  angle = angle_by_index[(tmp >> 6) & 0x3];
307 
308  Y[0] = tmp & 0x3F;
309  Y[1] = Y[0];
310 
311  if (angle) {
312  Y[2] = Y[0]+1;
313  if (Y[2] > 0x3F)
314  Y[2] = 0x3F;
315  Y[3] = Y[2];
316  } else {
317  Y[2] = Y[0];
318  Y[3] = Y[0];
319  }
320  break;
321 
322  case 2:
323  if (modifier) { // unpack four luma samples
324  tmp = bytestream2_get_be24(&s->gb);
325 
326  Y[0] = (tmp >> 18) & 0x3F;
327  Y[1] = (tmp >> 12) & 0x3F;
328  Y[2] = (tmp >> 6) & 0x3F;
329  Y[3] = tmp & 0x3F;
330  angle = 16;
331  } else { // retrieve luma samples from codebook
332  tmp = bytestream2_get_be16(&s->gb);
333 
334  angle = (tmp >> 12) & 0xF;
335  tmp &= 0xFFF;
336  tmp <<= 2;
337  Y[0] = s->ulti_codebook[tmp];
338  Y[1] = s->ulti_codebook[tmp + 1];
339  Y[2] = s->ulti_codebook[tmp + 2];
340  Y[3] = s->ulti_codebook[tmp + 3];
341  }
342  break;
343 
344  case 3:
345  if (modifier) { // all 16 luma samples
346  uint8_t Luma[16];
347 
348  if (bytestream2_get_bytes_left(&s->gb) < 12)
349  goto err;
350  tmp = bytestream2_get_be24u(&s->gb);
351  Luma[0] = (tmp >> 18) & 0x3F;
352  Luma[1] = (tmp >> 12) & 0x3F;
353  Luma[2] = (tmp >> 6) & 0x3F;
354  Luma[3] = tmp & 0x3F;
355 
356  tmp = bytestream2_get_be24u(&s->gb);
357  Luma[4] = (tmp >> 18) & 0x3F;
358  Luma[5] = (tmp >> 12) & 0x3F;
359  Luma[6] = (tmp >> 6) & 0x3F;
360  Luma[7] = tmp & 0x3F;
361 
362  tmp = bytestream2_get_be24u(&s->gb);
363  Luma[8] = (tmp >> 18) & 0x3F;
364  Luma[9] = (tmp >> 12) & 0x3F;
365  Luma[10] = (tmp >> 6) & 0x3F;
366  Luma[11] = tmp & 0x3F;
367 
368  tmp = bytestream2_get_be24u(&s->gb);
369  Luma[12] = (tmp >> 18) & 0x3F;
370  Luma[13] = (tmp >> 12) & 0x3F;
371  Luma[14] = (tmp >> 6) & 0x3F;
372  Luma[15] = tmp & 0x3F;
373 
374  ulti_convert_yuv(s->frame, tx, ty, Luma, chroma);
375  } else {
376  if (bytestream2_get_bytes_left(&s->gb) < 4)
377  goto err;
378  tmp = bytestream2_get_byteu(&s->gb);
379  if(tmp & 0x80) {
380  angle = (tmp >> 4) & 0x7;
381  tmp = (tmp << 8) + bytestream2_get_byteu(&s->gb);
382  Y[0] = (tmp >> 6) & 0x3F;
383  Y[1] = tmp & 0x3F;
384  Y[2] = bytestream2_get_byteu(&s->gb) & 0x3F;
385  Y[3] = bytestream2_get_byteu(&s->gb) & 0x3F;
386  ulti_grad(s->frame, tx, ty, Y, chroma, angle); //draw block
387  } else { // some patterns
388  int f0 = tmp;
389  int f1 = bytestream2_get_byteu(&s->gb);
390  Y[0] = bytestream2_get_byteu(&s->gb) & 0x3F;
391  Y[1] = bytestream2_get_byteu(&s->gb) & 0x3F;
392  ulti_pattern(s->frame, tx, ty, f0, f1, Y[0], Y[1], chroma);
393  }
394  }
395  break;
396  }
397  if(code != 3)
398  ulti_grad(s->frame, tx, ty, Y, chroma, angle); // draw block
399  }
400  blocks++;
401  x += 8;
402  if(x >= s->width) {
403  x = 0;
404  y += 8;
405  }
406  }
407  }
408 
409  *got_frame = 1;
410  if ((ret = av_frame_ref(data, s->frame)) < 0)
411  return ret;
412 
413  return buf_size;
414 
415 err:
416  av_log(avctx, AV_LOG_ERROR,
417  "Insufficient data\n");
418  return AVERROR_INVALIDDATA;
419 }
420 
422  .name = "ultimotion",
423  .long_name = NULL_IF_CONFIG_SMALL("IBM UltiMotion"),
424  .type = AVMEDIA_TYPE_VIDEO,
425  .id = AV_CODEC_ID_ULTI,
426  .priv_data_size = sizeof(UltimotionDecodeContext),
428  .close = ulti_decode_end,
430  .capabilities = AV_CODEC_CAP_DR1,
431 };
AVCodec
AVCodec.
Definition: codec.h:190
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ulti_convert_yuv
static void ulti_convert_yuv(AVFrame *frame, int x, int y, uint8_t *luma, int chroma)
Definition: ulti.c:96
GetByteContext
Definition: bytestream.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
chroma
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1631
data
const char data[16]
Definition: mxf.c:91
UltimotionDecodeContext::width
int width
Definition: ulti.c:39
ulti_cb.h
angle_by_index
static const int angle_by_index[4]
Definition: ulti.c:77
UltimotionDecodeContext::gb
GetByteContext gb
Definition: ulti.c:42
ulti_decode_init
static av_cold int ulti_decode_init(AVCodecContext *avctx)
Definition: ulti.c:45
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ulti_decode_frame
static int ulti_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: ulti.c:216
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
ulti_decode_end
static av_cold int ulti_decode_end(AVCodecContext *avctx)
Definition: ulti.c:65
s
#define s(width, name)
Definition: cbs_vp9.c:257
UltimotionDecodeContext::avctx
AVCodecContext * avctx
Definition: ulti.c:38
ulti_grad
static void ulti_grad(AVFrame *frame, int x, int y, uint8_t *Y, int chroma, int angle)
Definition: ulti.c:143
ff_ulti_decoder
AVCodec ff_ulti_decoder
Definition: ulti.c:421
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
UltimotionDecodeContext::frame
AVFrame * frame
Definition: ulti.c:40
UltimotionDecodeContext::ulti_codebook
const uint8_t * ulti_codebook
Definition: ulti.c:41
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
ulti_chromas
static const uint8_t ulti_chromas[16]
Definition: ulti.c:90
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
ulti_pattern
static void ulti_pattern(AVFrame *frame, int x, int y, int f0, int f1, int Y0, int Y1, int chroma)
Definition: ulti.c:120
Y
#define Y
Definition: boxblur.h:38
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
block_coords
static const int block_coords[8]
Definition: ulti.c:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::height
int height
Definition: avcodec.h:699
AV_CODEC_ID_ULTI
@ AV_CODEC_ID_ULTI
Definition: codec_id.h:106
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
ulti_lumas
static const uint8_t ulti_lumas[64]
Definition: ulti.c:80
avcodec.h
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1961
ret
ret
Definition: filter_design.txt:187
UltimotionDecodeContext::blocks
int blocks
Definition: ulti.c:39
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecContext
main external API structure.
Definition: avcodec.h:526
mode
mode
Definition: ebur128.h:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ulti_codebook
static const unsigned char ulti_codebook[16384]
Definition: ulti_cb.h:25
UltimotionDecodeContext
Definition: ulti.c:37
UltimotionDecodeContext::height
int height
Definition: ulti.c:39