84 #include <speex/speex.h> 85 #include <speex/speex_header.h> 86 #include <speex/speex_stereo.h> 114 const char *mode_str =
"unknown";
118 case SPEEX_MODEID_NB: mode_str =
"narrowband";
break;
119 case SPEEX_MODEID_WB: mode_str =
"wideband";
break;
120 case SPEEX_MODEID_UWB: mode_str =
"ultra-wideband";
break;
148 const SpeexMode *
mode;
156 "mono are supported\n", avctx->
channels);
162 case 8000: mode = speex_lib_get_mode(SPEEX_MODEID_NB);
break;
163 case 16000: mode = speex_lib_get_mode(SPEEX_MODEID_WB);
break;
164 case 32000: mode = speex_lib_get_mode(SPEEX_MODEID_UWB);
break;
167 "Resample to 8, 16, or 32 kHz.\n", avctx->
sample_rate);
193 speex_encoder_ctl(s->
enc_state, SPEEX_SET_ABR,
195 speex_encoder_ctl(s->
enc_state, SPEEX_GET_ABR,
198 speex_encoder_ctl(s->
enc_state, SPEEX_SET_BITRATE,
200 speex_encoder_ctl(s->
enc_state, SPEEX_GET_BITRATE,
205 speex_encoder_ctl(s->
enc_state, SPEEX_SET_QUALITY,
207 speex_encoder_ctl(s->
enc_state, SPEEX_GET_BITRATE,
217 speex_encoder_ctl(s->
enc_state, SPEEX_SET_VAD, &s->
vad);
221 speex_encoder_ctl(s->
enc_state, SPEEX_SET_DTX, &s->
dtx);
229 speex_encoder_ctl(s->
enc_state, SPEEX_SET_COMPLEXITY, &complexity);
231 speex_encoder_ctl(s->
enc_state, SPEEX_GET_COMPLEXITY, &complexity);
245 header_data = speex_header_to_packet(&s->
header, &header_size);
250 speex_header_free(header_data);
257 memcpy(avctx->
extradata, header_data, header_size);
259 speex_header_free(header_data);
262 speex_bits_init(&s->
bits);
278 speex_encode_stereo_int(samples, s->
header.frame_size, &s->
bits);
289 speex_bits_pack(&s->
bits, 15, 5);
300 speex_bits_reset(&s->
bits);
317 speex_bits_destroy(&s->
bits);
326 #define OFFSET(x) offsetof(LibSpeexEncContext, x) 327 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 346 {
"compression_level",
"3" },
365 .supported_samplerates = (
const int[]){ 8000, 16000, 32000, 0 },
368 .wrapper_name =
"libspeex",
int pkt_frame_count
frame count for the current packet
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
#define FF_COMPRESSION_DEFAULT
static av_cold int encode_init(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
#define AV_LOG_WARNING
Something somehow does not look correct.
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
static av_cold int init(AVCodecContext *avctx)
const char * av_default_item_name(void *ptr)
Return the context name.
int cbr_quality
CBR quality 0 to 10.
int vad
flag to enable VAD
SpeexBits bits
libspeex bitwriter context
static av_cold int encode_close(AVCodecContext *avctx)
#define AV_CH_LAYOUT_STEREO
int dtx
flag to enable DTX
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
AudioFrameQueue afq
frame queue
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVCodec ff_libspeex_encoder
int frames_per_packet
number of frames to encode in each packet
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const AVCodecDefault defaults[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int initial_padding
Audio only.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
audio channel layout utility functions
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
static const AVClass speex_class
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int frame_size
Number of samples per channel in an audio frame.
void * enc_state
libspeex encoder state
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
int sample_rate
samples per second
main external API structure.
Describe the class of an AVClass context structure.
int global_quality
Global quality for codecs which cannot change it per frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
common internal api header.
common internal and external API header
static av_cold void print_enc_params(AVCodecContext *avctx, LibSpeexEncContext *s)
SpeexHeader header
libspeex header struct
static const AVOption options[]
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
int channels
number of audio channels
int abr
flag to enable ABR
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
static enum AVSampleFormat sample_fmts[]
Filter the word “frame” indicates either a video frame or a group of audio samples
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
float vbr_quality
VBR quality 0.0 to 10.0.
#define AV_CH_LAYOUT_MONO
This structure stores compressed data.
mode
Use these values in ebur128_init (or'ed).
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...