155 #define YAE_ATEMPO_MIN 0.5 156 #define YAE_ATEMPO_MAX 100.0 158 #define OFFSET(x) offsetof(ATempoContext, x) 161 {
"tempo",
"set tempo scale factor",
178 return &atempo->
frag[(atempo->
nfrag + 1) % 2];
247 #define RE_MALLOC_OR_FAIL(field, field_size) \ 250 field = av_malloc(field_size); \ 252 yae_release_buffers(atempo); \ 253 return AVERROR(ENOMEM); \ 267 uint32_t nlevels = 0;
276 atempo->
window = sample_rate / 24;
283 if (pot < atempo->window) {
321 for (i = 0; i < atempo->
window; i++) {
322 double t = (double)i / (
double)(atempo->
window - 1);
323 double h = 0.5 * (1.0 - cos(2.0 *
M_PI * t));
324 atempo->
hann[
i] = (float)h;
346 #define yae_init_xdat(scalar_type, scalar_max) \ 348 const uint8_t *src_end = src + \ 349 frag->nsamples * atempo->channels * sizeof(scalar_type); \ 351 FFTSample *xdat = frag->xdat; \ 354 if (atempo->channels == 1) { \ 355 for (; src < src_end; xdat++) { \ 356 tmp = *(const scalar_type *)src; \ 357 src += sizeof(scalar_type); \ 359 *xdat = (FFTSample)tmp; \ 362 FFTSample s, max, ti, si; \ 365 for (; src < src_end; xdat++) { \ 366 tmp = *(const scalar_type *)src; \ 367 src += sizeof(scalar_type); \ 369 max = (FFTSample)tmp; \ 370 s = FFMIN((FFTSample)scalar_max, \ 371 (FFTSample)fabsf(max)); \ 373 for (i = 1; i < atempo->channels; i++) { \ 374 tmp = *(const scalar_type *)src; \ 375 src += sizeof(scalar_type); \ 377 ti = (FFTSample)tmp; \ 378 si = FFMIN((FFTSample)scalar_max, \ 379 (FFTSample)fabsf(ti)); \ 431 const int read_size = stop_here - atempo->
position[0];
433 if (stop_here <= atempo->
position[0]) {
440 while (atempo->
position[0] < stop_here && src < src_end) {
441 int src_samples = (src_end -
src) / atempo->
stride;
454 memcpy(a, src, na * atempo->
stride);
456 src += na * atempo->
stride;
469 memcpy(b, src, nb * atempo->
stride);
471 src += nb * atempo->
stride;
506 int64_t missing, start, zeros;
509 int i0, i1, n0, n1, na, nb;
512 if (src_ref &&
yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
519 stop_here - atempo->
position[0] : 0;
522 missing < (int64_t)atempo->
window ?
523 (uint32_t)(atempo->
window - missing) : 0;
537 memset(dst, 0, zeros * atempo->
stride);
538 dst += zeros * atempo->
stride;
541 if (zeros == nsamples) {
558 i0 = frag->
position[0] + zeros - start;
559 i1 = i0 < na ? 0 : i0 - na;
561 n0 = i0 < na ?
FFMIN(na - i0, (
int)(nsamples - zeros)) : 0;
562 n1 = nsamples - zeros - n0;
565 memcpy(dst, a + i0 * atempo->
stride, n0 * atempo->
stride);
566 dst += n0 * atempo->
stride;
570 memcpy(dst, b + i1 * atempo->
stride, n1 * atempo->
stride);
581 const double fragment_step = atempo->
tempo * (double)(atempo->
window / 2);
620 for (i = 1; i <
window; i++, xa++, xb++, xc++) {
621 xc->
re = (xa->re * xb->re + xa->im * xb->im);
622 xc->
im = (xa->im * xb->re - xa->re * xb->im);
643 int best_offset = -drift;
658 i0 =
FFMAX(window / 2 - delta_max - drift, 0);
659 i0 =
FFMIN(i0, window);
661 i1 =
FFMIN(window / 2 + delta_max - drift, window - window / 16);
665 xcorr = correlation + i0;
667 for (i = i0; i < i1; i++, xcorr++) {
674 if (metric > best_metric) {
675 best_metric = metric;
676 best_offset = i - window / 2;
694 const double prev_output_position =
698 const double ideal_output_position =
701 const int drift = (
int)(prev_output_position - ideal_output_position);
703 const int delta_max = atempo->
window / 2;
727 #define yae_blend(scalar_type) \ 729 const scalar_type *aaa = (const scalar_type *)a; \ 730 const scalar_type *bbb = (const scalar_type *)b; \ 732 scalar_type *out = (scalar_type *)dst; \ 733 scalar_type *out_end = (scalar_type *)dst_end; \ 736 for (i = 0; i < overlap && out < out_end; \ 737 i++, atempo->position[1]++, wa++, wb++) { \ 742 for (j = 0; j < atempo->channels; \ 743 j++, aaa++, bbb++, out++) { \ 744 float t0 = (float)*aaa; \ 745 float t1 = (float)*bbb; \ 748 frag->position[0] + i < 0 ? \ 750 (scalar_type)(t0 * w0 + t1 * w1); \ 753 dst = (uint8_t *)out; \ 778 const int64_t overlap = stop_here - start_here;
780 const int64_t ia = start_here - prev->
position[1];
781 const int64_t
ib = start_here - frag->
position[1];
783 const float *wa = atempo->
hann + ia;
784 const float *wb = atempo->
hann +
ib;
839 if (!atempo->
nfrag) {
912 if (!atempo->
nfrag) {
946 while (atempo->
position[1] < overlap_end) {
961 offset = start_here - frag->
position[1];
967 src_size = (
int)(stop_here - start_here) * atempo->
stride;
968 dst_size = dst_end - dst;
969 nbytes =
FFMIN(src_size, dst_size);
971 memcpy(dst, src, nbytes);
1085 int n_out = (
int)(0.5 + ((
double)n_in) / atempo->
tempo);
1095 while (src < src_end) {
1135 int n_max = atempo->
ring;
1139 while (err ==
AVERROR(EAGAIN)) {
1213 .priv_class = &atempo_class,
#define RE_MALLOC_OR_FAIL(field, field_size)
static int push_samples(ATempoContext *atempo, AVFilterLink *outlink, int n_out)
static void yae_xcorr_via_rdft(FFTSample *xcorr, RDFTContext *complex_to_real, const FFTComplex *xa, const FFTComplex *xb, const int window)
Calculate cross-correlation via rDFT.
This structure describes decoded (raw) audio or video data.
RDFTContext * complex_to_real
FilterState
Filter state machine states.
RDFTContext * real_to_complex
static int config_props(AVFilterLink *inlink)
Main libavfilter public API header.
enum AVSampleFormat format
#define AV_OPT_FLAG_AUDIO_PARAM
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void yae_apply(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, uint8_t **dst_ref, uint8_t *dst_end)
Feed as much data to the filter as it is able to consume and receive as much processed data in the de...
static const AVFilterPad atempo_outputs[]
static int request_frame(AVFilterLink *outlink)
AVFILTER_DEFINE_CLASS(atempo)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static void yae_advance_to_next_frag(ATempoContext *atempo)
Prepare for loading next audio fragment.
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
static AudioFragment * yae_prev_frag(ATempoContext *atempo)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
static AudioFragment * yae_curr_frag(ATempoContext *atempo)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define AVERROR_EOF
End of file.
static av_cold int init(AVFilterContext *ctx)
A filter pad used for either input or output.
static void yae_clear(ATempoContext *atempo)
Reset filter to initial state, do not deallocate existing local buffers.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
A link between two filters.
static int yae_load_data(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, int64_t stop_here)
Populate the internal data buffer on as-needed basis.
int sample_rate
samples per second
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
A fragment of audio waveform.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
void * priv
private data for use by the filter
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
simple assert() macros that are a bit more flexible than ISO C assert().
static int yae_flush(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Flush any buffered data from the filter.
void av_rdft_calc(RDFTContext *s, FFTSample *data)
static int yae_reset(ATempoContext *atempo, enum AVSampleFormat format, int sample_rate, int channels)
Prepare filter for processing audio data of given format, sample rate and number of channels...
static SDL_Window * window
#define AV_OPT_FLAG_RUNTIME_PARAM
a generic parameter which can be set by the user at runtime
static const AVFilterPad atempo_inputs[]
audio channel layout utility functions
static int yae_align(AudioFragment *frag, const AudioFragment *prev, const int window, const int delta_max, const int drift, FFTSample *correlation, RDFTContext *complex_to_real)
Calculate alignment offset for given fragment relative to the previous fragment.
static int yae_overlap_add(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Blend the overlap region of previous and current audio fragment and output the results to the given d...
void av_rdft_end(RDFTContext *s)
static int yae_adjust_position(ATempoContext *atempo)
Adjust current fragment position for better alignment with previous fragment.
AVFilterContext * src
source filter
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
#define yae_blend(scalar_type)
A helper macro for blending the overlap region of previous and current audio fragment.
static const AVFilterPad outputs[]
int format
agreed upon media format
A list of supported channel layouts.
static int query_formats(AVFilterContext *ctx)
static void yae_release_buffers(ATempoContext *atempo)
Reset filter to initial state and deallocate all buffers.
AVSampleFormat
Audio sample formats.
static void correlation(int32_t *corr, int32_t *ener, int16_t *buffer, int16_t lag, int16_t blen, int16_t srange, int16_t scale)
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
int sample_rate
Sample rate of the audio data.
Rational number (pair of numerator and denominator).
const char * name
Filter name.
AVFilterLink ** outputs
array of pointers to output links
enum MovChannelLayoutTag * layouts
#define yae_init_xdat(scalar_type, scalar_max)
A helper macro for initializing complex data buffer with scalar data of a given type.
#define flags(name, subs,...)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int channels
Number of channels.
static int yae_update(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
AVFilterContext * dst
dest filter
static int yae_load_frag(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end)
Populate current audio fragment data buffer.
static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
Initialize complex data buffer of a given audio fragment with down-mixed mono data of appropriate sca...
static enum AVSampleFormat sample_fmts[]
static const AVOption atempo_options[]
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_samples
number of audio samples (per channel) described by this frame
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_NOPTS_VALUE
Undefined timestamp value.
simple arithmetic expression evaluator