38 frame->
flags &= ~flags;
178 int min_poc = INT_MAX;
196 if (frame->
poc < min_poc || nb_output == 1) {
197 min_poc = frame->
poc;
220 "Output frame with POC %d.\n", frame->
poc);
236 int min_poc = INT_MAX;
241 if ((frame->
flags) &&
251 if ((frame->
flags) &&
255 min_poc = frame->
poc;
264 frame->
poc <= min_poc) {
283 for (i = ctb_addr_ts; i < ctb_count; i++)
309 for (list_idx = 0; list_idx < nb_list; list_idx++) {
335 for (i = 0; i < sh->
nb_refs[list_idx]; i++) {
344 rpl->
ref[
i] = rpl_tmp.
ref[idx];
349 memcpy(rpl, &rpl_tmp,
sizeof(*rpl));
369 if ((ref->
poc & mask) == poc)
376 "Could not find ref with POC %d\n", poc);
422 int poc,
int ref_flag,
uint8_t use_msb)
473 if (!short_rps->
used[i])
475 else if (i < short_rps->num_negative_pics)
486 for (i = 0; i < long_rps->
nb_refs; i++) {
487 int poc = long_rps->
poc[
i];
512 ret += !!rps->
used[i];
514 ret += !!rps->
used[i];
518 for (i = 0; i < long_rps->
nb_refs; i++)
519 ret += !!long_rps->
used[i];
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
This structure describes decoded (raw) audio or video data.
static void flush(AVCodecContext *avctx)
int max_dec_pic_buffering
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
void * hwaccel_picture_private
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
uint16_t seq_decode
Sequence counters for decoded and output frames, so that old frames are output first after a POC rese...
static HEVCFrame * alloc_frame(HEVCContext *s)
AVBufferPool * rpl_tab_pool
candidate references for the current frame
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
unsigned int num_negative_pics
enum HEVCNALUnitType nal_unit_type
struct HEVCFrame * ref[HEVC_MAX_REFS]
Multithreading support functions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
struct HEVCSPS::@69 temporal_layer[HEVC_MAX_SUB_LAYERS]
uint8_t poc_msb_present[32]
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
int interlaced_frame
The content of the picture is interlaced.
int slice_idx
number of the slice being currently decoded
static void mark_ref(HEVCFrame *frame, int flag)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
unsigned int log2_max_poc_lsb
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
#define HEVC_FRAME_FLAG_LONG_REF
static const uint16_t mask[17]
AVBufferRef * rpl_tab_buf
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
unsigned int log2_ctb_size
simple assert() macros that are a bit more flexible than ISO C assert().
#define HEVC_FRAME_FLAG_SHORT_REF
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
#define HEVC_FRAME_FLAG_OUTPUT
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
#define HEVC_FRAME_FLAG_BUMPING
AVBufferRef * tab_mvf_buf
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
#define FF_THREAD_FRAME
Decode more than one frame at once.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
struct HEVCFrame * collocated_ref
static HEVCFrame * find_ref_idx(HEVCContext *s, int poc, uint8_t use_msb)
#define FF_ARRAY_ELEMS(a)
static int init_slice_rpl(HEVCContext *s)
static HEVCFrame * generate_missing_ref(HEVCContext *s, int poc)
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
int * ctb_addr_rs_to_ts
CtbAddrRSToTS.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
AVBufferRef * hwaccel_priv_buf
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
uint8_t * data
The data buffer.
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
void ff_hevc_bump_frame(HEVCContext *s)
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
int size
Size of data in bytes.
RefPicList * ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *ref, int x0, int y0)
#define flags(name, subs,...)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int add_candidate_ref(HEVCContext *s, RefPicList *list, int poc, int ref_flag, uint8_t use_msb)
HEVCSEIPictureTiming picture_timing
common internal api header.
static int ref[MAX_W *MAX_W]
AVBufferPool * tab_mvf_pool
int top_field_first
If the content is interlaced, is top field displayed first.
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
unsigned int right_offset
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
unsigned int bottom_offset
int isLongTerm[HEVC_MAX_REFS]