FFmpeg
g722enc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) CMU 1993 Computer Science, Speech Group
3  * Chengxiang Lu and Alex Hauptmann
4  * Copyright (c) 2005 Steve Underwood <steveu at coppice.org>
5  * Copyright (c) 2009 Kenan Gillet
6  * Copyright (c) 2010 Martin Storsjo
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * G.722 ADPCM audio encoder
28  */
29 
30 #include "libavutil/avassert.h"
32 #include "avcodec.h"
33 #include "encode.h"
34 #include "internal.h"
35 #include "g722.h"
36 #include "libavutil/common.h"
37 
38 #define FREEZE_INTERVAL 128
39 
40 /* This is an arbitrary value. Allowing insanely large values leads to strange
41  problems, so we limit it to a reasonable value */
42 #define MAX_FRAME_SIZE 32768
43 
44 /* We clip the value of avctx->trellis to prevent data type overflows and
45  undefined behavior. Using larger values is insanely slow anyway. */
46 #define MIN_TRELLIS 0
47 #define MAX_TRELLIS 16
48 
50 {
51  G722Context *c = avctx->priv_data;
52  int i;
53  for (i = 0; i < 2; i++) {
54  av_freep(&c->paths[i]);
55  av_freep(&c->node_buf[i]);
56  av_freep(&c->nodep_buf[i]);
57  }
58  return 0;
59 }
60 
62 {
63  G722Context *c = avctx->priv_data;
64 
65  c->band[0].scale_factor = 8;
66  c->band[1].scale_factor = 2;
67  c->prev_samples_pos = 22;
68 
69  if (avctx->frame_size) {
70  /* validate frame size */
71  if (avctx->frame_size & 1 || avctx->frame_size > MAX_FRAME_SIZE) {
72  int new_frame_size;
73 
74  if (avctx->frame_size == 1)
75  new_frame_size = 2;
76  else if (avctx->frame_size > MAX_FRAME_SIZE)
77  new_frame_size = MAX_FRAME_SIZE;
78  else
79  new_frame_size = avctx->frame_size - 1;
80 
81  av_log(avctx, AV_LOG_WARNING, "Requested frame size is not "
82  "allowed. Using %d instead of %d\n", new_frame_size,
83  avctx->frame_size);
84  avctx->frame_size = new_frame_size;
85  }
86  } else {
87  /* This is arbitrary. We use 320 because it's 20ms @ 16kHz, which is
88  a common packet size for VoIP applications */
89  avctx->frame_size = 320;
90  }
91  avctx->initial_padding = 22;
92 
93  if (avctx->trellis) {
94  /* validate trellis */
95  if (avctx->trellis < MIN_TRELLIS || avctx->trellis > MAX_TRELLIS) {
96  int new_trellis = av_clip(avctx->trellis, MIN_TRELLIS, MAX_TRELLIS);
97  av_log(avctx, AV_LOG_WARNING, "Requested trellis value is not "
98  "allowed. Using %d instead of %d\n", new_trellis,
99  avctx->trellis);
100  avctx->trellis = new_trellis;
101  }
102  if (avctx->trellis) {
103  int frontier = 1 << avctx->trellis;
104  int max_paths = frontier * FREEZE_INTERVAL;
105 
106  for (int i = 0; i < 2; i++) {
107  c->paths[i] = av_calloc(max_paths, sizeof(**c->paths));
108  c->node_buf[i] = av_calloc(frontier, 2 * sizeof(**c->node_buf));
109  c->nodep_buf[i] = av_calloc(frontier, 2 * sizeof(**c->nodep_buf));
110  if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i])
111  return AVERROR(ENOMEM);
112  }
113  }
114  }
115 
116  ff_g722dsp_init(&c->dsp);
117 
118  return 0;
119 }
120 
121 static const int16_t low_quant[33] = {
122  35, 72, 110, 150, 190, 233, 276, 323,
123  370, 422, 473, 530, 587, 650, 714, 786,
124  858, 940, 1023, 1121, 1219, 1339, 1458, 1612,
125  1765, 1980, 2195, 2557, 2919
126 };
127 
128 static inline void filter_samples(G722Context *c, const int16_t *samples,
129  int *xlow, int *xhigh)
130 {
131  int xout[2];
132  c->prev_samples[c->prev_samples_pos++] = samples[0];
133  c->prev_samples[c->prev_samples_pos++] = samples[1];
134  c->dsp.apply_qmf(c->prev_samples + c->prev_samples_pos - 24, xout);
135  *xlow = xout[0] + xout[1] >> 14;
136  *xhigh = xout[0] - xout[1] >> 14;
137  if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
138  memmove(c->prev_samples,
139  c->prev_samples + c->prev_samples_pos - 22,
140  22 * sizeof(c->prev_samples[0]));
141  c->prev_samples_pos = 22;
142  }
143 }
144 
145 static inline int encode_high(const struct G722Band *state, int xhigh)
146 {
147  int diff = av_clip_int16(xhigh - state->s_predictor);
148  int pred = 141 * state->scale_factor >> 8;
149  /* = diff >= 0 ? (diff < pred) + 2 : diff >= -pred */
150  return ((diff ^ (diff >> (sizeof(diff)*8-1))) < pred) + 2*(diff >= 0);
151 }
152 
153 static inline int encode_low(const struct G722Band* state, int xlow)
154 {
155  int diff = av_clip_int16(xlow - state->s_predictor);
156  /* = diff >= 0 ? diff : -(diff + 1) */
157  int limit = diff ^ (diff >> (sizeof(diff)*8-1));
158  int i = 0;
159  limit = limit + 1 << 10;
160  if (limit > low_quant[8] * state->scale_factor)
161  i = 9;
162  while (i < 29 && limit > low_quant[i] * state->scale_factor)
163  i++;
164  return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i;
165 }
166 
167 static void g722_encode_trellis(G722Context *c, int trellis,
168  uint8_t *dst, int nb_samples,
169  const int16_t *samples)
170 {
171  int i, j, k;
172  int frontier = 1 << trellis;
173  struct TrellisNode **nodes[2];
174  struct TrellisNode **nodes_next[2];
175  int pathn[2] = {0, 0}, froze = -1;
176  struct TrellisPath *p[2];
177 
178  for (i = 0; i < 2; i++) {
179  nodes[i] = c->nodep_buf[i];
180  nodes_next[i] = c->nodep_buf[i] + frontier;
181  memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf[i]));
182  nodes[i][0] = c->node_buf[i] + frontier;
183  nodes[i][0]->ssd = 0;
184  nodes[i][0]->path = 0;
185  nodes[i][0]->state = c->band[i];
186  }
187 
188  for (i = 0; i < nb_samples >> 1; i++) {
189  int xlow, xhigh;
190  struct TrellisNode *next[2];
191  int heap_pos[2] = {0, 0};
192 
193  for (j = 0; j < 2; j++) {
194  next[j] = c->node_buf[j] + frontier*(i & 1);
195  memset(nodes_next[j], 0, frontier * sizeof(**nodes_next));
196  }
197 
198  filter_samples(c, &samples[2*i], &xlow, &xhigh);
199 
200  for (j = 0; j < frontier && nodes[0][j]; j++) {
201  /* Only k >> 2 affects the future adaptive state, therefore testing
202  * small steps that don't change k >> 2 is useless, the original
203  * value from encode_low is better than them. Since we step k
204  * in steps of 4, make sure range is a multiple of 4, so that
205  * we don't miss the original value from encode_low. */
206  int range = j < frontier/2 ? 4 : 0;
207  struct TrellisNode *cur_node = nodes[0][j];
208 
209  int ilow = encode_low(&cur_node->state, xlow);
210 
211  for (k = ilow - range; k <= ilow + range && k <= 63; k += 4) {
212  int decoded, dec_diff, pos;
213  uint32_t ssd;
214  struct TrellisNode* node;
215 
216  if (k < 0)
217  continue;
218 
219  decoded = av_clip_intp2((cur_node->state.scale_factor *
220  ff_g722_low_inv_quant6[k] >> 10)
221  + cur_node->state.s_predictor, 14);
222  dec_diff = xlow - decoded;
223 
224 #define STORE_NODE(index, UPDATE, VALUE)\
225  ssd = cur_node->ssd + dec_diff*dec_diff;\
226  /* Check for wraparound. Using 64 bit ssd counters would \
227  * be simpler, but is slower on x86 32 bit. */\
228  if (ssd < cur_node->ssd)\
229  continue;\
230  if (heap_pos[index] < frontier) {\
231  pos = heap_pos[index]++;\
232  av_assert2(pathn[index] < FREEZE_INTERVAL * frontier);\
233  node = nodes_next[index][pos] = next[index]++;\
234  node->path = pathn[index]++;\
235  } else {\
236  /* Try to replace one of the leaf nodes with the new \
237  * one, but not always testing the same leaf position */\
238  pos = (frontier>>1) + (heap_pos[index] & ((frontier>>1) - 1));\
239  if (ssd >= nodes_next[index][pos]->ssd)\
240  continue;\
241  heap_pos[index]++;\
242  node = nodes_next[index][pos];\
243  }\
244  node->ssd = ssd;\
245  node->state = cur_node->state;\
246  UPDATE;\
247  c->paths[index][node->path].value = VALUE;\
248  c->paths[index][node->path].prev = cur_node->path;\
249  /* Sift the newly inserted node up in the heap to restore \
250  * the heap property */\
251  while (pos > 0) {\
252  int parent = (pos - 1) >> 1;\
253  if (nodes_next[index][parent]->ssd <= ssd)\
254  break;\
255  FFSWAP(struct TrellisNode*, nodes_next[index][parent],\
256  nodes_next[index][pos]);\
257  pos = parent;\
258  }
259  STORE_NODE(0, ff_g722_update_low_predictor(&node->state, k >> 2), k);
260  }
261  }
262 
263  for (j = 0; j < frontier && nodes[1][j]; j++) {
264  int ihigh;
265  struct TrellisNode *cur_node = nodes[1][j];
266 
267  /* We don't try to get any initial guess for ihigh via
268  * encode_high - since there's only 4 possible values, test
269  * them all. Testing all of these gives a much, much larger
270  * gain than testing a larger range around ilow. */
271  for (ihigh = 0; ihigh < 4; ihigh++) {
272  int dhigh, decoded, dec_diff, pos;
273  uint32_t ssd;
274  struct TrellisNode* node;
275 
276  dhigh = cur_node->state.scale_factor *
277  ff_g722_high_inv_quant[ihigh] >> 10;
278  decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14);
279  dec_diff = xhigh - decoded;
280 
281  STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh);
282  }
283  }
284 
285  for (j = 0; j < 2; j++) {
286  FFSWAP(struct TrellisNode**, nodes[j], nodes_next[j]);
287 
288  if (nodes[j][0]->ssd > (1 << 16)) {
289  for (k = 1; k < frontier && nodes[j][k]; k++)
290  nodes[j][k]->ssd -= nodes[j][0]->ssd;
291  nodes[j][0]->ssd = 0;
292  }
293  }
294 
295  if (i == froze + FREEZE_INTERVAL) {
296  p[0] = &c->paths[0][nodes[0][0]->path];
297  p[1] = &c->paths[1][nodes[1][0]->path];
298  for (j = i; j > froze; j--) {
299  dst[j] = p[1]->value << 6 | p[0]->value;
300  p[0] = &c->paths[0][p[0]->prev];
301  p[1] = &c->paths[1][p[1]->prev];
302  }
303  froze = i;
304  pathn[0] = pathn[1] = 0;
305  memset(nodes[0] + 1, 0, (frontier - 1)*sizeof(**nodes));
306  memset(nodes[1] + 1, 0, (frontier - 1)*sizeof(**nodes));
307  }
308  }
309 
310  p[0] = &c->paths[0][nodes[0][0]->path];
311  p[1] = &c->paths[1][nodes[1][0]->path];
312  for (j = i; j > froze; j--) {
313  dst[j] = p[1]->value << 6 | p[0]->value;
314  p[0] = &c->paths[0][p[0]->prev];
315  p[1] = &c->paths[1][p[1]->prev];
316  }
317  c->band[0] = nodes[0][0]->state;
318  c->band[1] = nodes[1][0]->state;
319 }
320 
321 static av_always_inline void encode_byte(G722Context *c, uint8_t *dst,
322  const int16_t *samples)
323 {
324  int xlow, xhigh, ilow, ihigh;
325  filter_samples(c, samples, &xlow, &xhigh);
326  ihigh = encode_high(&c->band[1], xhigh);
327  ilow = encode_low (&c->band[0], xlow);
328  ff_g722_update_high_predictor(&c->band[1], c->band[1].scale_factor *
329  ff_g722_high_inv_quant[ihigh] >> 10, ihigh);
330  ff_g722_update_low_predictor(&c->band[0], ilow >> 2);
331  *dst = ihigh << 6 | ilow;
332 }
333 
335  uint8_t *dst, int nb_samples,
336  const int16_t *samples)
337 {
338  int i;
339  for (i = 0; i < nb_samples; i += 2)
340  encode_byte(c, dst++, &samples[i]);
341 }
342 
343 static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
344  const AVFrame *frame, int *got_packet_ptr)
345 {
346  G722Context *c = avctx->priv_data;
347  const int16_t *samples = (const int16_t *)frame->data[0];
348  int nb_samples, out_size, ret;
349 
350  out_size = (frame->nb_samples + 1) / 2;
351  if ((ret = ff_get_encode_buffer(avctx, avpkt, out_size, 0)) < 0)
352  return ret;
353 
354  nb_samples = frame->nb_samples - (frame->nb_samples & 1);
355 
356  if (avctx->trellis)
357  g722_encode_trellis(c, avctx->trellis, avpkt->data, nb_samples, samples);
358  else
359  g722_encode_no_trellis(c, avpkt->data, nb_samples, samples);
360 
361  /* handle last frame with odd frame_size */
362  if (nb_samples < frame->nb_samples) {
363  int16_t last_samples[2] = { samples[nb_samples], samples[nb_samples] };
364  encode_byte(c, &avpkt->data[nb_samples >> 1], last_samples);
365  }
366 
367  if (frame->pts != AV_NOPTS_VALUE)
368  avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding);
369  *got_packet_ptr = 1;
370  return 0;
371 }
372 
374  .name = "g722",
375  .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
376  .type = AVMEDIA_TYPE_AUDIO,
379  .priv_data_size = sizeof(G722Context),
381  .close = g722_encode_close,
382  .encode2 = g722_encode_frame,
384  .channel_layouts = (const uint64_t[]){ AV_CH_LAYOUT_MONO, 0 },
386 };
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1012
AVCodec
AVCodec.
Definition: codec.h:202
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
av_clip
#define av_clip
Definition: common.h:96
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
PREV_SAMPLES_BUF_SIZE
#define PREV_SAMPLES_BUF_SIZE
Definition: g722.h:32
AV_CH_LAYOUT_MONO
#define AV_CH_LAYOUT_MONO
Definition: channel_layout.h:90
TrellisNode::path
int path
Definition: adpcmenc.c:60
out_size
int out_size
Definition: movenc.c:55
MIN_TRELLIS
#define MIN_TRELLIS
Definition: g722enc.c:46
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
encode.h
AV_CODEC_ID_ADPCM_G722
@ AV_CODEC_ID_ADPCM_G722
Definition: codec_id.h:381
encode_high
static int encode_high(const struct G722Band *state, int xhigh)
Definition: g722enc.c:145
filter_samples
static void filter_samples(G722Context *c, const int16_t *samples, int *xlow, int *xhigh)
Definition: g722enc.c:128
state
static struct @319 state
g722_encode_init
static av_cold int g722_encode_init(AVCodecContext *avctx)
Definition: g722enc.c:61
g722_encode_no_trellis
static void g722_encode_no_trellis(G722Context *c, uint8_t *dst, int nb_samples, const int16_t *samples)
Definition: g722enc.c:331
ff_g722_low_inv_quant6
const int16_t ff_g722_low_inv_quant6[64]
Definition: g722.c:63
AVCodecContext::initial_padding
int initial_padding
Audio only.
Definition: avcodec.h:1701
TrellisNode
Definition: adpcmenc.c:58
avassert.h
TrellisNode::ssd
uint32_t ssd
Definition: adpcmenc.c:59
ff_samples_to_time_base
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
Definition: internal.h:238
av_cold
#define av_cold
Definition: attributes.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_g722_high_inv_quant
const int16_t ff_g722_high_inv_quant[4]
Definition: g722.c:51
TrellisPath
Definition: aaccoder.c:188
G722Context
Definition: g722.h:34
av_clip_int16
#define av_clip_int16
Definition: common.h:111
av_clip_intp2
#define av_clip_intp2
Definition: common.h:117
ff_g722_update_high_predictor
void ff_g722_update_high_predictor(struct G722Band *band, const int dhigh, const int ihigh)
Definition: g722.c:154
g722_encode_trellis
static void g722_encode_trellis(G722Context *c, int trellis, uint8_t *dst, int nb_samples, const int16_t *samples)
Definition: g722enc.c:167
g722_encode_close
static av_cold int g722_encode_close(AVCodecContext *avctx)
Definition: g722enc.c:49
FREEZE_INTERVAL
#define FREEZE_INTERVAL
Definition: g722enc.c:38
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1229
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
MAX_FRAME_SIZE
#define MAX_FRAME_SIZE
Definition: g722enc.c:42
low_quant
static const int16_t low_quant[33]
Definition: g722enc.c:121
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
encode_low
static int encode_low(const struct G722Band *state, int xlow)
Definition: g722enc.c:153
g722.h
ff_adpcm_g722_encoder
const AVCodec ff_adpcm_g722_encoder
Definition: g722enc.c:370
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
common.h
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
TrellisPath::prev
int prev
Definition: aaccoder.c:190
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AVCodecContext
main external API structure.
Definition: avcodec.h:383
channel_layout.h
STORE_NODE
#define STORE_NODE(index, UPDATE, VALUE)
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:78
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
g722_encode_frame
static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: g722enc.c:340
MAX_TRELLIS
#define MAX_TRELLIS
Definition: g722enc.c:47
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
ff_g722_update_low_predictor
void ff_g722_update_low_predictor(struct G722Band *band, const int ilow)
Definition: g722.c:143
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_g722dsp_init
av_cold void ff_g722dsp_init(G722DSPContext *c)
Definition: g722dsp.c:68
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:87
encode_byte
static av_always_inline void encode_byte(G722Context *c, uint8_t *dst, const int16_t *samples)
Definition: g722enc.c:318