FFmpeg
notchlc.c
Go to the documentation of this file.
1 /*
2  * NotchLC decoder
3  * Copyright (c) 2020 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #define BITSTREAM_READER_LE
27 #include "libavutil/intreadwrite.h"
28 #include "avcodec.h"
29 #include "bytestream.h"
30 #include "get_bits.h"
31 #include "internal.h"
32 #include "lzf.h"
33 #include "thread.h"
34 
35 typedef struct NotchLCContext {
36  unsigned compressed_size;
37  unsigned format;
38 
41 
43  int64_t lzf_size;
44 
45  unsigned texture_size_x;
46  unsigned texture_size_y;
51  unsigned y_data_offset;
52  unsigned uv_data_offset;
53  unsigned y_data_size;
54  unsigned uv_count_size;
55  unsigned uv_count_offset;
56  unsigned a_count_size;
57  unsigned data_end;
58 
62 
64 {
67  avctx->colorspace = AVCOL_SPC_RGB;
70 
71  return 0;
72 }
73 
74 #define HISTORY_SIZE (64 * 1024)
75 
76 static int lz4_decompress(AVCodecContext *avctx,
77  GetByteContext *gb,
78  PutByteContext *pb)
79 {
80  unsigned reference_pos, match_length, delta, pos = 0;
81  uint8_t history[64 * 1024];
82 
83  while (bytestream2_get_bytes_left(gb) > 0) {
84  uint8_t token = bytestream2_get_byte(gb);
85  unsigned num_literals = token >> 4;
86 
87  if (num_literals == 15) {
88  unsigned char current;
89  do {
90  current = bytestream2_get_byte(gb);
91  num_literals += current;
92  } while (current == 255);
93  }
94 
95  if (pos + num_literals < HISTORY_SIZE) {
96  bytestream2_get_buffer(gb, history + pos, num_literals);
97  pos += num_literals;
98  } else {
99  while (num_literals-- > 0) {
100  history[pos++] = bytestream2_get_byte(gb);
101  if (pos == HISTORY_SIZE) {
102  bytestream2_put_buffer(pb, history, HISTORY_SIZE);
103  pos = 0;
104  }
105  }
106  }
107 
108  if (bytestream2_get_bytes_left(gb) <= 0)
109  break;
110 
111  delta = bytestream2_get_byte(gb);
112  delta |= (unsigned)bytestream2_get_byte(gb) << 8;
113  if (delta == 0)
114  return 0;
115  match_length = 4 + (token & 0x0F);
116  if (match_length == 4 + 0x0F) {
117  uint8_t current;
118 
119  do {
120  current = bytestream2_get_byte(gb);
121  match_length += current;
122  } while (current == 255);
123  }
124  reference_pos = (pos >= delta) ? (pos - delta) : (HISTORY_SIZE + pos - delta);
125  if (pos + match_length < HISTORY_SIZE && reference_pos + match_length < HISTORY_SIZE) {
126  if (pos >= reference_pos + match_length || reference_pos >= pos + match_length) {
127  memcpy(history + pos, history + reference_pos, match_length);
128  pos += match_length;
129  } else {
130  while (match_length-- > 0)
131  history[pos++] = history[reference_pos++];
132  }
133  } else {
134  while (match_length-- > 0) {
135  history[pos++] = history[reference_pos++];
136  if (pos == HISTORY_SIZE) {
137  bytestream2_put_buffer(pb, history, HISTORY_SIZE);
138  pos = 0;
139  }
140  reference_pos %= HISTORY_SIZE;
141  }
142  }
143  }
144 
145  bytestream2_put_buffer(pb, history, pos);
146 
147  return bytestream2_tell_p(pb);
148 }
149 
151  unsigned uncompressed_size)
152 {
153  NotchLCContext *s = avctx->priv_data;
154  GetByteContext rgb, dgb, *gb = &s->gb;
156  int ylinesize, ulinesize, vlinesize, alinesize;
157  uint16_t *dsty, *dstu, *dstv, *dsta;
158  int ret;
159 
160  s->texture_size_x = bytestream2_get_le32(gb);
161  s->texture_size_y = bytestream2_get_le32(gb);
162 
163  ret = ff_set_dimensions(avctx, s->texture_size_x, s->texture_size_y);
164  if (ret < 0)
165  return ret;
166 
167  s->uv_offset_data_offset = bytestream2_get_le32(gb);
168  if (s->uv_offset_data_offset >= UINT_MAX / 4)
169  return AVERROR_INVALIDDATA;
170  s->uv_offset_data_offset *= 4;
171  if (s->uv_offset_data_offset >= uncompressed_size)
172  return AVERROR_INVALIDDATA;
173 
174  s->y_control_data_offset = bytestream2_get_le32(gb);
175  if (s->y_control_data_offset >= UINT_MAX / 4)
176  return AVERROR_INVALIDDATA;
177  s->y_control_data_offset *= 4;
178  if (s->y_control_data_offset >= uncompressed_size)
179  return AVERROR_INVALIDDATA;
180 
181  s->a_control_word_offset = bytestream2_get_le32(gb);
182  if (s->a_control_word_offset >= UINT_MAX / 4)
183  return AVERROR_INVALIDDATA;
184  s->a_control_word_offset *= 4;
185  if (s->a_control_word_offset >= uncompressed_size)
186  return AVERROR_INVALIDDATA;
187 
188  s->uv_data_offset = bytestream2_get_le32(gb);
189  if (s->uv_data_offset >= UINT_MAX / 4)
190  return AVERROR_INVALIDDATA;
191  s->uv_data_offset *= 4;
192  if (s->uv_data_offset >= uncompressed_size)
193  return AVERROR_INVALIDDATA;
194 
195  s->y_data_size = bytestream2_get_le32(gb);
196  if (s->y_data_size >= UINT_MAX / 4)
197  return AVERROR_INVALIDDATA;
198 
199  s->uv_count_size = bytestream2_get_le32(gb);
200  if (s->uv_count_size >= UINT_MAX / 4)
201  return AVERROR_INVALIDDATA;
202  s->uv_count_size *= 4;
203  if (s->uv_count_size >= uncompressed_size)
204  return AVERROR_INVALIDDATA;
205 
206  s->a_count_size = bytestream2_get_le32(gb);
207  if (s->a_count_size >= UINT_MAX / 4)
208  return AVERROR_INVALIDDATA;
209  s->a_count_size *= 4;
210  if (s->a_count_size >= uncompressed_size)
211  return AVERROR_INVALIDDATA;
212 
213  s->data_end = bytestream2_get_le32(gb);
214  if (s->data_end > uncompressed_size)
215  return AVERROR_INVALIDDATA;
216 
217  s->y_data_row_offsets = bytestream2_tell(gb);
218  if (s->data_end <= s->y_data_size)
219  return AVERROR_INVALIDDATA;
220  s->y_data_offset = s->data_end - s->y_data_size;
221  if (s->y_data_offset <= s->uv_count_size)
222  return AVERROR_INVALIDDATA;
223  s->uv_count_offset = s->y_data_offset - s->uv_count_size;
224 
225  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
226  return ret;
227 
228  rgb = *gb;
229  dgb = *gb;
230  bytestream2_seek(&rgb, s->y_data_row_offsets, SEEK_SET);
231  bytestream2_seek(gb, s->y_control_data_offset, SEEK_SET);
232 
233  dsty = (uint16_t *)p->data[0];
234  dsta = (uint16_t *)p->data[3];
235  ylinesize = p->linesize[0] / 2;
236  alinesize = p->linesize[3] / 2;
237 
238  for (int y = 0; y < avctx->height; y += 4) {
239  const unsigned row_offset = bytestream2_get_le32(&rgb);
240 
241  bytestream2_seek(&dgb, s->y_data_offset + row_offset, SEEK_SET);
242 
244  for (int x = 0; x < avctx->width; x += 4) {
245  unsigned item = bytestream2_get_le32(gb);
246  unsigned y_min = item & 4095;
247  unsigned y_max = (item >> 12) & 4095;
248  unsigned y_diff = y_max - y_min;
249  unsigned control[4];
250 
251  control[0] = (item >> 24) & 3;
252  control[1] = (item >> 26) & 3;
253  control[2] = (item >> 28) & 3;
254  control[3] = (item >> 30) & 3;
255 
256  for (int i = 0; i < 4; i++) {
257  const int nb_bits = control[i] + 1;
258  const int div = (1 << nb_bits) - 1;
259  const int add = div - 1;
260 
261  dsty[x + i * ylinesize + 0] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
262  dsty[x + i * ylinesize + 1] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
263  dsty[x + i * ylinesize + 2] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
264  dsty[x + i * ylinesize + 3] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
265  }
266  }
267 
268  dsty += 4 * ylinesize;
269  dsta += 4 * alinesize;
270  }
271 
272  rgb = *gb;
273  dgb = *gb;
274  bytestream2_seek(&rgb, s->uv_offset_data_offset, SEEK_SET);
275  bytestream2_seek(gb, s->a_control_word_offset, SEEK_SET);
276 
277  dstu = (uint16_t *)p->data[1];
278  dstv = (uint16_t *)p->data[2];
279  ulinesize = p->linesize[1] / 2;
280  vlinesize = p->linesize[2] / 2;
281 
282  for (int y = 0; y < avctx->height; y += 16) {
283  for (int x = 0; x < avctx->width; x += 16) {
284  unsigned offset = bytestream2_get_le32(&rgb) * 4;
285  int u[16][16] = { 0 }, v[16][16] = { 0 };
286  int u0, v0, u1, v1, udif, vdif;
287  unsigned escape, is8x8, loc;
288 
289  bytestream2_seek(&dgb, s->uv_data_offset + offset, SEEK_SET);
290 
291  is8x8 = bytestream2_get_le16(&dgb);
292  escape = bytestream2_get_le16(&dgb);
293 
294  if (escape == 0 && is8x8 == 0) {
295  u0 = bytestream2_get_byte(&dgb);
296  v0 = bytestream2_get_byte(&dgb);
297  u1 = bytestream2_get_byte(&dgb);
298  v1 = bytestream2_get_byte(&dgb);
299  loc = bytestream2_get_le32(&dgb);
300  u0 = (u0 << 4) | (u0 & 0xF);
301  v0 = (v0 << 4) | (v0 & 0xF);
302  u1 = (u1 << 4) | (u1 & 0xF);
303  v1 = (v1 << 4) | (v1 & 0xF);
304  udif = u1 - u0;
305  vdif = v1 - v0;
306 
307  for (int i = 0; i < 16; i += 4) {
308  for (int j = 0; j < 16; j += 4) {
309  for (int ii = 0; ii < 4; ii++) {
310  for (int jj = 0; jj < 4; jj++) {
311  u[i + ii][j + jj] = u0 + ((udif * (int)(loc & 3) + 2) / 3);
312  v[i + ii][j + jj] = v0 + ((vdif * (int)(loc & 3) + 2) / 3);
313  }
314  }
315 
316  loc >>= 2;
317  }
318  }
319  } else {
320  for (int i = 0; i < 16; i += 8) {
321  for (int j = 0; j < 16; j += 8) {
322  if (is8x8 & 1) {
323  u0 = bytestream2_get_byte(&dgb);
324  v0 = bytestream2_get_byte(&dgb);
325  u1 = bytestream2_get_byte(&dgb);
326  v1 = bytestream2_get_byte(&dgb);
327  loc = bytestream2_get_le32(&dgb);
328  u0 = (u0 << 4) | (u0 & 0xF);
329  v0 = (v0 << 4) | (v0 & 0xF);
330  u1 = (u1 << 4) | (u1 & 0xF);
331  v1 = (v1 << 4) | (v1 & 0xF);
332  udif = u1 - u0;
333  vdif = v1 - v0;
334 
335  for (int ii = 0; ii < 8; ii += 2) {
336  for (int jj = 0; jj < 8; jj += 2) {
337  for (int iii = 0; iii < 2; iii++) {
338  for (int jjj = 0; jjj < 2; jjj++) {
339  u[i + ii + iii][j + jj + jjj] = u0 + ((udif * (int)(loc & 3) + 2) / 3);
340  v[i + ii + iii][j + jj + jjj] = v0 + ((vdif * (int)(loc & 3) + 2) / 3);
341  }
342  }
343 
344  loc >>= 2;
345  }
346  }
347  } else if (escape) {
348  for (int ii = 0; ii < 8; ii += 4) {
349  for (int jj = 0; jj < 8; jj += 4) {
350  u0 = bytestream2_get_byte(&dgb);
351  v0 = bytestream2_get_byte(&dgb);
352  u1 = bytestream2_get_byte(&dgb);
353  v1 = bytestream2_get_byte(&dgb);
354  loc = bytestream2_get_le32(&dgb);
355  u0 = (u0 << 4) | (u0 & 0xF);
356  v0 = (v0 << 4) | (v0 & 0xF);
357  u1 = (u1 << 4) | (u1 & 0xF);
358  v1 = (v1 << 4) | (v1 & 0xF);
359  udif = u1 - u0;
360  vdif = v1 - v0;
361 
362  for (int iii = 0; iii < 4; iii++) {
363  for (int jjj = 0; jjj < 4; jjj++) {
364  u[i + ii + iii][j + jj + jjj] = u0 + ((udif * (int)(loc & 3) + 2) / 3);
365  v[i + ii + iii][j + jj + jjj] = v0 + ((vdif * (int)(loc & 3) + 2) / 3);
366 
367  loc >>= 2;
368  }
369  }
370  }
371  }
372  }
373 
374  is8x8 >>= 1;
375  }
376  }
377  }
378 
379  for (int i = 0; i < 16; i++) {
380  for (int j = 0; j < 16; j++) {
381  dstu[x + i * ulinesize + j] = u[i][j];
382  dstv[x + i * vlinesize + j] = v[i][j];
383  }
384  }
385  }
386 
387  dstu += 16 * ulinesize;
388  dstv += 16 * vlinesize;
389  }
390 
391  return 0;
392 }
393 
394 static int decode_frame(AVCodecContext *avctx,
395  void *data, int *got_frame,
396  AVPacket *avpkt)
397 {
398  NotchLCContext *s = avctx->priv_data;
399  ThreadFrame frame = { .f = data };
400  GetByteContext *gb = &s->gb;
401  PutByteContext *pb = &s->pb;
402  unsigned uncompressed_size;
403  AVFrame *p = data;
404  int ret;
405 
406  if (avpkt->size <= 40)
407  return AVERROR_INVALIDDATA;
408 
409  bytestream2_init(gb, avpkt->data, avpkt->size);
410 
411  if (bytestream2_get_le32(gb) != MKBETAG('N','L','C','1'))
412  return AVERROR_INVALIDDATA;
413 
414  uncompressed_size = bytestream2_get_le32(gb);
415  s->compressed_size = bytestream2_get_le32(gb);
416  s->format = bytestream2_get_le32(gb);
417 
418  if (s->format > 2)
419  return AVERROR_PATCHWELCOME;
420 
421  if (s->format == 0) {
422  ret = ff_lzf_uncompress(gb, &s->lzf_buffer, &s->lzf_size);
423  if (ret < 0)
424  return ret;
425 
426  if (uncompressed_size > s->lzf_size)
427  return AVERROR_INVALIDDATA;
428 
429  bytestream2_init(gb, s->lzf_buffer, uncompressed_size);
430  } else if (s->format == 1) {
431  av_fast_padded_malloc(&s->uncompressed_buffer, &s->uncompressed_size,
432  uncompressed_size);
433  if (!s->uncompressed_buffer)
434  return AVERROR(ENOMEM);
435 
436  bytestream2_init_writer(pb, s->uncompressed_buffer, s->uncompressed_size);
437 
438  ret = lz4_decompress(avctx, gb, pb);
439  if (ret != uncompressed_size)
440  return AVERROR_INVALIDDATA;
441 
442  bytestream2_init(gb, s->uncompressed_buffer, uncompressed_size);
443  }
444 
445  ret = decode_blocks(avctx, p, &frame, uncompressed_size);
446  if (ret < 0)
447  return ret;
448 
450  p->key_frame = 1;
451 
452  *got_frame = 1;
453 
454  return avpkt->size;
455 }
456 
458 {
459  NotchLCContext *s = avctx->priv_data;
460 
461  av_freep(&s->uncompressed_buffer);
462  s->uncompressed_size = 0;
463  av_freep(&s->lzf_buffer);
464  s->lzf_size = 0;
465 
466  return 0;
467 }
468 
470  .name = "notchlc",
471  .long_name = NULL_IF_CONFIG_SMALL("NotchLC"),
472  .type = AVMEDIA_TYPE_VIDEO,
473  .id = AV_CODEC_ID_NOTCHLC,
474  .priv_data_size = sizeof(NotchLCContext),
475  .init = decode_init,
476  .close = decode_end,
477  .decode = decode_frame,
479 };
NotchLCContext::lzf_buffer
uint8_t * lzf_buffer
Definition: notchlc.c:42
AVCodec
AVCodec.
Definition: codec.h:190
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
GetByteContext
Definition: bytestream.h:33
NotchLCContext::y_data_offset
unsigned y_data_offset
Definition: notchlc.c:51
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
ff_notchlc_decoder
AVCodec ff_notchlc_decoder
Definition: notchlc.c:469
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: notchlc.c:394
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
data
const char data[16]
Definition: mxf.c:91
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:510
NotchLCContext::compressed_size
unsigned compressed_size
Definition: notchlc.c:36
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:494
v0
#define v0
Definition: regdef.h:26
NotchLCContext::texture_size_x
unsigned texture_size_x
Definition: notchlc.c:45
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
NotchLCContext::format
unsigned format
Definition: notchlc.c:37
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: notchlc.c:63
NotchLCContext::texture_size_y
unsigned texture_size_y
Definition: notchlc.c:46
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
NotchLCContext::pb
PutByteContext pb
Definition: notchlc.c:60
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
decode_blocks
static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame, unsigned uncompressed_size)
Definition: notchlc.c:150
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
NotchLCContext::y_data_size
unsigned y_data_size
Definition: notchlc.c:53
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
get_bits.h
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
NotchLCContext::lzf_size
int64_t lzf_size
Definition: notchlc.c:43
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
NotchLCContext::y_control_data_offset
unsigned y_control_data_offset
Definition: notchlc.c:49
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:457
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
NotchLCContext
Definition: notchlc.c:35
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
HISTORY_SIZE
#define HISTORY_SIZE
Definition: notchlc.c:74
PutByteContext
Definition: bytestream.h:37
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
NotchLCContext::gb
GetByteContext gb
Definition: notchlc.c:59
NotchLCContext::y_data_row_offsets
unsigned y_data_row_offsets
Definition: notchlc.c:47
ff_lzf_uncompress
int ff_lzf_uncompress(GetByteContext *gb, uint8_t **buf, int64_t *size)
Definition: lzf.c:40
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: common.h:407
NotchLCContext::uv_data_offset
unsigned uv_data_offset
Definition: notchlc.c:52
NotchLCContext::uv_count_offset
unsigned uv_count_offset
Definition: notchlc.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AV_CODEC_ID_NOTCHLC
@ AV_CODEC_ID_NOTCHLC
Definition: codec_id.h:296
NotchLCContext::uv_offset_data_offset
unsigned uv_offset_data_offset
Definition: notchlc.c:48
NotchLCContext::data_end
unsigned data_end
Definition: notchlc.c:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
delta
float delta
Definition: vorbis_enc_data.h:457
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: notchlc.c:457
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ThreadFrame
Definition: thread.h:34
NotchLCContext::uv_count_size
unsigned uv_count_size
Definition: notchlc.c:54
NotchLCContext::uncompressed_size
unsigned uncompressed_size
Definition: notchlc.c:40
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
lz4_decompress
static int lz4_decompress(AVCodecContext *avctx, GetByteContext *gb, PutByteContext *pb)
Definition: notchlc.c:76
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
NotchLCContext::a_control_word_offset
unsigned a_control_word_offset
Definition: notchlc.c:50
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
NotchLCContext::uncompressed_buffer
uint8_t * uncompressed_buffer
Definition: notchlc.c:39
NotchLCContext::a_count_size
unsigned a_count_size
Definition: notchlc.c:56
int
int
Definition: ffmpeg_filter.c:192
lzf.h