35 for (n = 0; n <
len; n++)
36 s +=
MUL64(x[n-j], x[n-k]);
47 tmp +=
MUL64(
a[0], corr[1]);
48 tmp +=
MUL64(
a[1], corr[2]);
49 tmp +=
MUL64(
a[2], corr[3]);
50 tmp +=
MUL64(
a[3], corr[4]);
58 tmp +=
MUL64(corr[5], aa[0]);
59 tmp +=
MUL64(corr[6], aa[1]);
60 tmp +=
MUL64(corr[7], aa[2]);
61 tmp +=
MUL64(corr[8], aa[3]);
63 tmp +=
MUL64(corr[9], aa[4]);
64 tmp +=
MUL64(corr[10], aa[5]);
65 tmp +=
MUL64(corr[11], aa[6]);
67 tmp +=
MUL64(corr[12], aa[7]);
68 tmp +=
MUL64(corr[13], aa[8]);
70 tmp +=
MUL64(corr[14], aa[9]);
85 int64_t min_err = 1ll << 62;
109 int64_t signal_energy = 0;
110 int64_t error_energy = 0;
112 for (i = 0; i <
len; i++) {
116 error_energy +=
MUL64(error, error);
122 return signal_energy / error_energy;
139 shift_bits =
av_log2(max) - 11;
142 input_buffer[
i] =
norm__(in[i], 7);
143 input_buffer2[
i] =
norm__(in[i], shift_bits);
158 for (i = 0; i <
len; i++)
195 for (i = 0; i <
len; i++) {
198 delta = (int64_t)in[i] - ((int64_t)work_bufer[DCA_ADPCM_COEFFS +
i] << 7);
204 work_bufer[DCA_ADPCM_COEFFS+
i] += dequant_delta;
207 memcpy(next_hist, &work_bufer[len],
sizeof(
int32_t) * DCA_ADPCM_COEFFS);
av_cold int ff_dcaadpcm_init(DCAADPCMEncContext *s)
ptrdiff_t const GLvoid * data
int32_t premultiplied_coeffs[10]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void error(const char *err)
static int64_t ff_dcaadpcm_predict(int pred_vq_index, const int32_t *input)
static int64_t calc_corr(const int32_t *x, int len, int j, int k)
int ff_dcaadpcm_do_real(int pred_vq_index, softfloat quant, int32_t scale_factor, int32_t step_size, const int32_t *prev_hist, const int32_t *in, int32_t *next_hist, int32_t *out, int len, int32_t peak)
static int32_t quantize_value(int32_t value, softfloat quant)
static void ff_dca_core_dequantize(int32_t *output, const int32_t *input, int32_t step_size, int32_t scale, int residual, int len)
#define DCA_ADPCM_VQCODEBOOK_SZ
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int64_t apply_filter(const int16_t a[DCA_ADPCM_COEFFS], const int64_t corr[15], const int32_t aa[10])
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void precalc(premultiplied_coeffs *data)
const int16_t ff_dca_adpcm_vb[DCA_ADPCM_VQCODEBOOK_SZ][DCA_ADPCM_COEFFS]
int ff_dcaadpcm_subband_analysis(const DCAADPCMEncContext *s, const int32_t *in, int len, int *diff)
static int64_t calc_prediction_gain(int pred_vq, const int32_t *in, int32_t *out, int len)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static int64_t find_best_filter(const DCAADPCMEncContext *s, const int32_t *in, int len)
static int32_t norm__(int64_t a, int bits)
av_cold void ff_dcaadpcm_free(DCAADPCMEncContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions