FFmpeg
aptxenc.c
Go to the documentation of this file.
1 /*
2  * Audio Processing Technology codec for Bluetooth (aptX)
3  *
4  * Copyright (C) 2017 Aurelien Jacobs <aurel@gnuage.org>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
24 #include "aptx.h"
25 #include "encode.h"
26 
27 /*
28  * Half-band QMF analysis filter realized with a polyphase FIR filter.
29  * Split into 2 subbands and downsample by 2.
30  * So for each pair of samples that goes in, one sample goes out,
31  * split into 2 separate subbands.
32  */
35  const int32_t coeffs[NB_FILTERS][FILTER_TAPS],
36  int shift,
38  int32_t *low_subband_output,
39  int32_t *high_subband_output)
40 {
42  int i;
43 
44  for (i = 0; i < NB_FILTERS; i++) {
46  subbands[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift);
47  }
48 
49  *low_subband_output = av_clip_intp2(subbands[0] + subbands[1], 23);
50  *high_subband_output = av_clip_intp2(subbands[0] - subbands[1], 23);
51 }
52 
53 /*
54  * Two stage QMF analysis tree.
55  * Split 4 input samples into 4 subbands and downsample by 4.
56  * So for each group of 4 samples that goes in, one sample goes out,
57  * split into 4 separate subbands.
58  */
60  int32_t samples[4],
61  int32_t subband_samples[4])
62 {
63  int32_t intermediate_samples[4];
64  int i;
65 
66  /* Split 4 input samples into 2 intermediate subbands downsampled to 2 samples */
67  for (i = 0; i < 2; i++)
70  &samples[2*i],
71  &intermediate_samples[0+i],
72  &intermediate_samples[2+i]);
73 
74  /* Split 2 intermediate subband samples into 4 final subbands downsampled to 1 sample */
75  for (i = 0; i < 2; i++)
78  &intermediate_samples[2*i],
79  &subband_samples[2*i+0],
80  &subband_samples[2*i+1]);
81 }
82 
85  const int32_t *intervals, int32_t nb_intervals)
86 {
87  int32_t idx = 0;
88  int i;
89 
90  for (i = nb_intervals >> 1; i > 0; i >>= 1)
91  if (MUL64(factor, intervals[idx + i]) <= ((int64_t)value << 24))
92  idx += i;
93 
94  return idx;
95 }
96 
98  int32_t sample_difference,
100  int32_t quantization_factor,
102 {
103  const int32_t *intervals = tables->quantize_intervals;
104  int32_t quantized_sample, dithered_sample, parity_change;
105  int32_t d, mean, interval, inv, sample_difference_abs;
106  int64_t error;
107 
108  sample_difference_abs = FFABS(sample_difference);
109  sample_difference_abs = FFMIN(sample_difference_abs, (1 << 23) - 1);
110 
111  quantized_sample = aptx_bin_search(sample_difference_abs >> 4,
112  quantization_factor,
113  intervals, tables->tables_size);
114 
115  d = rshift32_clip24(MULH(dither, dither), 7) - (1 << 23);
116  d = rshift64(MUL64(d, tables->quantize_dither_factors[quantized_sample]), 23);
117 
118  intervals += quantized_sample;
119  mean = (intervals[1] + intervals[0]) / 2;
120  interval = (intervals[1] - intervals[0]) * (-(sample_difference < 0) | 1);
121 
122  dithered_sample = rshift64_clip24(MUL64(dither, interval) + ((int64_t)av_clip_intp2(mean + d, 23) << 32), 32);
123  error = ((int64_t)sample_difference_abs << 20) - MUL64(dithered_sample, quantization_factor);
124  quantize->error = FFABS(rshift64(error, 23));
125 
126  parity_change = quantized_sample;
127  if (error < 0)
128  quantized_sample--;
129  else
130  parity_change--;
131 
132  inv = -(sample_difference < 0);
133  quantize->quantized_sample = quantized_sample ^ inv;
134  quantize->quantized_sample_parity_change = parity_change ^ inv;
135 }
136 
138 {
139  int32_t subband_samples[4];
140  int subband;
141  aptx_qmf_tree_analysis(&channel->qmf, samples, subband_samples);
143  for (subband = 0; subband < NB_SUBBANDS; subband++) {
144  int32_t diff = av_clip_intp2(subband_samples[subband] - channel->prediction[subband].predicted_sample, 23);
145  aptx_quantize_difference(&channel->quantize[subband], diff,
146  channel->dither[subband],
147  channel->invert_quantize[subband].quantization_factor,
148  &ff_aptx_quant_tables[hd][subband]);
149  }
150 }
151 
153 {
154  if (aptx_check_parity(channels, idx)) {
155  int i;
156  Channel *c;
157  static const int map[] = { 1, 2, 0, 3 };
158  Quantize *min = &channels[NB_CHANNELS-1].quantize[map[0]];
159  for (c = &channels[NB_CHANNELS-1]; c >= channels; c--)
160  for (i = 0; i < NB_SUBBANDS; i++)
161  if (c->quantize[map[i]].error < min->error)
162  min = &c->quantize[map[i]];
163 
164  /* Forcing the desired parity is done by offsetting by 1 the quantized
165  * sample from the subband featuring the smallest quantization error. */
166  min->quantized_sample = min->quantized_sample_parity_change;
167  }
168 }
169 
171 {
173  return (((channel->quantize[3].quantized_sample & 0x06) | parity) << 13)
174  | (((channel->quantize[2].quantized_sample & 0x03) ) << 11)
175  | (((channel->quantize[1].quantized_sample & 0x0F) ) << 7)
176  | (((channel->quantize[0].quantized_sample & 0x7F) ) << 0);
177 }
178 
180 {
182  return (((channel->quantize[3].quantized_sample & 0x01E) | parity) << 19)
183  | (((channel->quantize[2].quantized_sample & 0x00F) ) << 15)
184  | (((channel->quantize[1].quantized_sample & 0x03F) ) << 9)
185  | (((channel->quantize[0].quantized_sample & 0x1FF) ) << 0);
186 }
187 
190  uint8_t *output)
191 {
192  int channel;
193  for (channel = 0; channel < NB_CHANNELS; channel++)
194  aptx_encode_channel(&ctx->channels[channel], samples[channel], ctx->hd);
195 
196  aptx_insert_sync(ctx->channels, &ctx->sync_idx);
197 
198  for (channel = 0; channel < NB_CHANNELS; channel++) {
200  if (ctx->hd)
201  AV_WB24(output + 3*channel,
202  aptxhd_pack_codeword(&ctx->channels[channel]));
203  else
204  AV_WB16(output + 2*channel,
205  aptx_pack_codeword(&ctx->channels[channel]));
206  }
207 }
208 
209 static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
210  const AVFrame *frame, int *got_packet_ptr)
211 {
212  AptXContext *s = avctx->priv_data;
213  int pos, ipos, channel, sample, output_size, ret;
214 
215  if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
216  return ret;
217 
218  output_size = s->block_size * frame->nb_samples/4;
219  if ((ret = ff_get_encode_buffer(avctx, avpkt, output_size, 0)) < 0)
220  return ret;
221 
222  for (pos = 0, ipos = 0; pos < output_size; pos += s->block_size, ipos += 4) {
224 
225  for (channel = 0; channel < NB_CHANNELS; channel++)
226  for (sample = 0; sample < 4; sample++)
227  samples[channel][sample] = (int32_t)AV_RN32A(&frame->data[channel][4*(ipos+sample)]) >> 8;
228 
229  aptx_encode_samples(s, samples, avpkt->data + pos);
230  }
231 
232  ff_af_queue_remove(&s->afq, frame->nb_samples, &avpkt->pts, &avpkt->duration);
233  *got_packet_ptr = 1;
234  return 0;
235 }
236 
238 {
239  AptXContext *s = avctx->priv_data;
240  ff_af_queue_close(&s->afq);
241  return 0;
242 }
243 
244 #if CONFIG_APTX_ENCODER
245 const AVCodec ff_aptx_encoder = {
246  .name = "aptx",
247  .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
248  .type = AVMEDIA_TYPE_AUDIO,
249  .id = AV_CODEC_ID_APTX,
251  .priv_data_size = sizeof(AptXContext),
252  .init = ff_aptx_init,
253  .encode2 = aptx_encode_frame,
254  .close = aptx_close,
255  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
256  .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
257  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
259  .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
260 };
261 #endif
262 
263 #if CONFIG_APTX_HD_ENCODER
264 const AVCodec ff_aptx_hd_encoder = {
265  .name = "aptx_hd",
266  .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
267  .type = AVMEDIA_TYPE_AUDIO,
268  .id = AV_CODEC_ID_APTX_HD,
270  .priv_data_size = sizeof(AptXContext),
271  .init = ff_aptx_init,
272  .encode2 = aptx_encode_frame,
273  .close = aptx_close,
274  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
275  .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
276  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
278  .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
279 };
280 #endif
ff_aptx_quant_tables
ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS]
Definition: aptx.c:312
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
AVCodec
AVCodec.
Definition: codec.h:202
Channel
Definition: aptx.h:83
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
FILTER_TAPS
#define FILTER_TAPS
Definition: aptx.h:48
ff_af_queue_remove
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
Definition: audio_frame_queue.c:75
aptx_quantized_parity
static int32_t aptx_quantized_parity(Channel *channel)
Definition: aptx.h:191
ff_af_queue_close
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
Definition: audio_frame_queue.c:36
aptx_encode_frame
static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: aptxenc.c:209
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
QMFAnalysis
Definition: aptx.h:55
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVPacket::data
uint8_t * data
Definition: packet.h:373
encode.h
ff_aptx_generate_dither
void ff_aptx_generate_dither(Channel *channel)
Definition: aptx.c:384
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:391
subbands
subbands
Definition: aptx.h:39
AptXContext
Definition: aptx.h:94
init
static int init
Definition: av_tx.c:47
QMFAnalysis::inner_filter_signal
FilterSignal inner_filter_signal[NB_FILTERS][NB_FILTERS]
Definition: aptx.h:57
aptx_qmf_tree_analysis
static void aptx_qmf_tree_analysis(QMFAnalysis *qmf, int32_t samples[4], int32_t subband_samples[4])
Definition: aptxenc.c:59
NB_FILTERS
@ NB_FILTERS
Definition: vf_waveform.c:54
quantize
static int quantize(CinepakEncContext *s, int h, uint8_t *data[4], int linesize[4], int v1mode, strip_info *info, mb_encoding encoding)
Definition: cinepakenc.c:700
MULH
#define MULH
Definition: mathops.h:42
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
ff_af_queue_add
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
Definition: audio_frame_queue.c:44
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:91
aptx_pack_codeword
static uint16_t aptx_pack_codeword(Channel *channel)
Definition: aptxenc.c:170
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:257
ConstTables
Definition: aptx.h:102
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
NB_CHANNELS
@ NB_CHANNELS
Definition: aptx.h:36
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
aptxhd_pack_codeword
static uint32_t aptxhd_pack_codeword(Channel *channel)
Definition: aptxenc.c:179
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
QMFAnalysis::outer_filter_signal
FilterSignal outer_filter_signal[NB_FILTERS]
Definition: aptx.h:56
aptx_qmf_convolution
static av_always_inline int32_t aptx_qmf_convolution(FilterSignal *signal, const int32_t coeffs[FILTER_TAPS], int shift)
Definition: aptx.h:177
aptx_qmf_polyphase_analysis
static av_always_inline void aptx_qmf_polyphase_analysis(FilterSignal signal[NB_FILTERS], const int32_t coeffs[NB_FILTERS][FILTER_TAPS], int shift, int32_t samples[NB_FILTERS], int32_t *low_subband_output, int32_t *high_subband_output)
Definition: aptxenc.c:34
av_clip_intp2
#define av_clip_intp2
Definition: common.h:117
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
aptx_qmf_outer_coeffs
static const int32_t aptx_qmf_outer_coeffs[NB_FILTERS][FILTER_TAPS]
Definition: aptx.h:135
FilterSignal
Definition: aptx.h:50
aptx_encode_samples
static void aptx_encode_samples(AptXContext *ctx, int32_t samples[NB_CHANNELS][4], uint8_t *output)
Definition: aptxenc.c:188
aptx.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
aptx_qmf_inner_coeffs
static const int32_t aptx_qmf_inner_coeffs[NB_FILTERS][FILTER_TAPS]
Definition: aptx.h:150
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
parity
mcdeint parity
Definition: vf_mcdeint.c:266
AV_WB24
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
aptx_quantize_difference
static void aptx_quantize_difference(Quantize *quantize, int32_t sample_difference, int32_t dither, int32_t quantization_factor, ConstTables *tables)
Definition: aptxenc.c:97
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
aptx_qmf_filter_signal_push
static av_always_inline void aptx_qmf_filter_signal_push(FilterSignal *signal, int32_t sample)
Definition: aptx.h:165
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
ff_aptx_hd_encoder
const AVCodec ff_aptx_hd_encoder
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:526
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
ff_aptx_init
av_cold int ff_aptx_init(AVCodecContext *avctx)
Definition: aptx.c:507
AVCodecContext
main external API structure.
Definition: avcodec.h:383
channel_layout.h
ff_aptx_invert_quantize_and_prediction
void ff_aptx_invert_quantize_and_prediction(Channel *channel, int hd)
Definition: aptx.c:496
aptx_bin_search
static av_always_inline int32_t aptx_bin_search(int32_t value, int32_t factor, const int32_t *intervals, int32_t nb_intervals)
Definition: aptxenc.c:84
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:78
MUL64
#define MUL64(a, b)
Definition: mathops.h:54
ff_aptx_encoder
const AVCodec ff_aptx_encoder
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:855
factor
static const int factor[16]
Definition: vf_pp7.c:76
shift
static int shift(int a, int b)
Definition: sonic.c:83
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVPacket
This structure stores compressed data.
Definition: packet.h:350
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:114
d
d
Definition: ffmpeg_filter.c:153
int32_t
int32_t
Definition: audioconvert.c:56
aptx_encode_channel
static void aptx_encode_channel(Channel *channel, int32_t samples[4], int hd)
Definition: aptxenc.c:137
aptx_close
static av_cold int aptx_close(AVCodecContext *avctx)
Definition: aptxenc.c:237
AV_CODEC_ID_APTX
@ AV_CODEC_ID_APTX
Definition: codec_id.h:508
NB_SUBBANDS
@ NB_SUBBANDS
Definition: aptx.h:44
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:87
AV_CODEC_ID_APTX_HD
@ AV_CODEC_ID_APTX_HD
Definition: codec_id.h:509
channel
channel
Definition: ebur128.h:39
Quantize
Definition: aptx.h:60
aptx_insert_sync
static void aptx_insert_sync(Channel channels[NB_CHANNELS], int32_t *idx)
Definition: aptxenc.c:152
aptx_check_parity
static int aptx_check_parity(Channel channels[NB_CHANNELS], int32_t *idx)
Definition: aptx.h:204
min
float min
Definition: vorbis_enc_data.h:429
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:58