43 #include <theora/theoraenc.h> 64 if (packet->bytes < 0) {
65 message =
"ogg_packet has negative size";
66 }
else if (packet->bytes > 0xffff) {
67 message =
"ogg_packet is larger than 65535 bytes";
68 }
else if (newsize < avc_context->extradata_size) {
69 message =
"extradata_size would overflow";
73 message =
"av_realloc failed";
84 memcpy(avc_context->
extradata + (*offset), packet->packet, packet->bytes);
85 (*offset) += packet->bytes;
91 #ifdef TH_ENCCTL_2PASS_OUT 96 bytes = th_encode_ctl(h->
t_state, TH_ENCCTL_2PASS_OUT, &buf,
sizeof(buf));
112 memcpy(h->
stats, buf, bytes);
129 #ifdef TH_ENCCTL_2PASS_IN 146 bytes = th_encode_ctl(h->
t_state, TH_ENCCTL_2PASS_IN,
167 th_comment t_comment;
175 th_info_init(&t_info);
178 t_info.pic_width = avc_context->
width;
179 t_info.pic_height = avc_context->
height;
190 t_info.aspect_numerator = 1;
191 t_info.aspect_denominator = 1;
195 t_info.colorspace = TH_CS_ITU_REC_470M;
197 t_info.colorspace = TH_CS_ITU_REC_470BG;
199 t_info.colorspace = TH_CS_UNSPECIFIED;
202 t_info.pixel_fmt = TH_PF_420;
204 t_info.pixel_fmt = TH_PF_422;
206 t_info.pixel_fmt = TH_PF_444;
222 t_info.target_bitrate = 0;
224 t_info.target_bitrate = avc_context->
bit_rate;
229 h->
t_state = th_encode_alloc(&t_info);
237 th_info_clear(&t_info);
239 if (th_encode_ctl(h->
t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
240 &gop_size,
sizeof(gop_size))) {
247 if ((ret =
get_stats(avc_context, 0)) < 0)
264 th_comment_init(&t_comment);
266 while (th_encode_flushheader(h->
t_state, &t_comment, &o_packet))
270 th_comment_clear(&t_comment);
278 th_ycbcr_buffer t_yuv_buffer;
285 th_encode_packetout(h->
t_state, 1, &o_packet);
287 if ((ret =
get_stats(avc_context, 1)) < 0)
293 for (i = 0; i < 3; i++) {
297 t_yuv_buffer[
i].data = frame->
data[
i];
305 result = th_encode_ycbcr_in(h->
t_state, t_yuv_buffer);
310 message =
"differing frame sizes";
313 message =
"encoder is not ready or is finished";
316 message =
"unknown reason";
319 av_log(avc_context,
AV_LOG_ERROR,
"theora_encode_YUVin failed (%s) [%d]\n", message, result);
324 if ((ret =
get_stats(avc_context, 0)) < 0)
328 result = th_encode_packetout(h->
t_state, 0, &o_packet);
344 memcpy(pkt->
data, o_packet.packet, o_packet.bytes);
349 #if FF_API_CODED_FRAME 388 .wrapper_name =
"libtheora",
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int64_t bit_rate
the average bitrate
static av_cold int init(AVCodecContext *avctx)
static int concatenate_packet(unsigned int *offset, AVCodecContext *avc_context, const ogg_packet *packet)
Concatenate an ogg_packet into the extradata.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
static int submit_stats(AVCodecContext *avctx)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
char * stats_out
pass1 encoding statistics output buffer
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
int flags
A combination of AV_PKT_FLAG values.
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int get_stats(AVCodecContext *avctx, int eos)
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
int width
picture width / height.
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
static av_cold int encode_init(AVCodecContext *avc_context)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
static int FUNC() message(CodedBitstreamContext *ctx, RWContext *rw, SEIRawMessage *current)
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int ogg_packet(AVFormatContext *s, int *sid, int *dstart, int *dsize, int64_t *fpos)
find the next Ogg packet
main external API structure.
static av_cold int encode_close(AVCodecContext *avc_context)
static enum AVPixelFormat pix_fmts[]
static int encode_frame(AVCodecContext *avc_context, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int global_quality
Global quality for codecs which cannot change it per frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
common internal and external API header
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define FF_ENABLE_DEPRECATION_WARNINGS
AVCodec ff_libtheora_encoder
AVCodec struct exposed to libavcodec.
int key_frame
1 -> keyframe, 0-> not
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
and forward the result(frame or status change) to the corresponding input.If nothing is possible
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...