22 #include <drm_fourcc.h> 24 #include <rockchip/mpp_buffer.h> 25 #include <rockchip/rk_mpi.h> 41 #define RECEIVE_FRAME_TIMEOUT 100 42 #define FRAMEGROUP_MAX_FRAMES 16 43 #define INPUT_MAX_PACKETS 4 74 default:
return MPP_VIDEO_CodingUnused;
81 case MPP_FMT_YUV420SP:
return DRM_FORMAT_NV12;
82 #ifdef DRM_FORMAT_NV12_10 83 case MPP_FMT_YUV420SP_10BIT:
return DRM_FORMAT_NV12_10;
97 ret = mpp_packet_init(&packet, buffer, size);
103 mpp_packet_set_pts(packet, pts);
106 mpp_packet_set_eos(packet);
108 ret = decoder->
mpi->decode_put_packet(decoder->
ctx, packet);
110 if (
ret == MPP_ERR_BUFFER_FULL) {
119 mpp_packet_deinit(&packet);
136 decoder->
mpi->reset(decoder->
ctx);
137 mpp_destroy(decoder->
ctx);
156 MppCodingType codectype = MPP_VIDEO_CodingUnused;
181 if (codectype == MPP_VIDEO_CodingUnused) {
187 ret = mpp_check_support_format(MPP_CTX_DEC, codectype);
195 ret = mpp_create(&decoder->
ctx, &decoder->
mpi);
203 ret = mpp_init(decoder->
ctx, MPP_CTX_DEC, codectype);
211 paramS32 = MPP_POLL_BLOCK;
212 ret = decoder->
mpi->control(decoder->
ctx, MPP_SET_OUTPUT_BLOCK, ¶mS32);
214 av_log(avctx,
AV_LOG_ERROR,
"Failed to set blocking mode on MPI (code = %d).\n", ret);
220 ret = decoder->
mpi->control(decoder->
ctx, MPP_SET_OUTPUT_BLOCK_TIMEOUT, ¶mS64);
222 av_log(avctx,
AV_LOG_ERROR,
"Failed to set block timeout on MPI (code = %d).\n", ret);
227 ret = mpp_buffer_group_get_internal(&decoder->
frame_group, MPP_BUFFER_TYPE_ION);
234 ret = decoder->
mpi->control(decoder->
ctx, MPP_DEC_SET_EXT_BUF_GROUP, decoder->
frame_group);
313 mpp_frame_deinit(&framecontext->
frame);
327 MppFrame mppframe =
NULL;
332 MppFrameFormat mppformat;
335 ret = decoder->
mpi->decode_get_frame(decoder->
ctx, &mppframe);
336 if (
ret != MPP_OK &&
ret != MPP_ERR_TIMEOUT) {
343 if (mpp_frame_get_info_change(mppframe)) {
346 av_log(avctx,
AV_LOG_INFO,
"Decoder noticed an info change (%dx%d), format=%d\n",
347 (
int)mpp_frame_get_width(mppframe), (
int)mpp_frame_get_height(mppframe),
348 (
int)mpp_frame_get_fmt(mppframe));
350 avctx->
width = mpp_frame_get_width(mppframe);
351 avctx->
height = mpp_frame_get_height(mppframe);
353 decoder->
mpi->control(decoder->
ctx, MPP_DEC_SET_INFO_CHANGE_READY,
NULL);
363 mppformat = mpp_frame_get_fmt(mppframe);
378 }
else if (mpp_frame_get_eos(mppframe)) {
383 }
else if (mpp_frame_get_discard(mppframe)) {
387 }
else if (mpp_frame_get_errinfo(mppframe)) {
398 frame->
width = mpp_frame_get_width(mppframe);
399 frame->
height = mpp_frame_get_height(mppframe);
400 frame->
pts = mpp_frame_get_pts(mppframe);
401 frame->
color_range = mpp_frame_get_color_range(mppframe);
403 frame->
color_trc = mpp_frame_get_color_trc(mppframe);
404 frame->
colorspace = mpp_frame_get_colorspace(mppframe);
406 mode = mpp_frame_get_mode(mppframe);
407 frame->
interlaced_frame = ((
mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED);
408 frame->
top_field_first = ((
mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST);
410 mppformat = mpp_frame_get_fmt(mppframe);
414 buffer = mpp_frame_get_buffer(mppframe);
422 desc->nb_objects = 1;
423 desc->objects[0].fd = mpp_buffer_get_fd(
buffer);
424 desc->objects[0].size = mpp_buffer_get_size(
buffer);
427 layer = &
desc->layers[0];
428 layer->format = drmformat;
429 layer->nb_planes = 2;
431 layer->planes[0].object_index = 0;
432 layer->planes[0].offset = 0;
433 layer->planes[0].pitch = mpp_frame_get_hor_stride(mppframe);
435 layer->planes[1].object_index = 0;
436 layer->planes[1].offset = layer->planes[0].pitch * mpp_frame_get_ver_stride(mppframe);
437 layer->planes[1].pitch = layer->planes[0].pitch;
442 if (!framecontextref) {
450 framecontext->frame = mppframe;
456 if (!frame->
buf[0]) {
469 av_log(avctx,
AV_LOG_ERROR,
"Failed to retrieve the frame buffer, frame is dropped (code = %d)\n",
ret);
470 mpp_frame_deinit(&mppframe);
474 }
else if (
ret == MPP_ERR_TIMEOUT) {
482 mpp_frame_deinit(&mppframe);
502 RK_S32 usedslots, freeslots;
556 #define RKMPP_DEC_CLASS(NAME) \ 557 static const AVClass rkmpp_##NAME##_dec_class = { \ 558 .class_name = "rkmpp_" #NAME "_dec", \ 559 .version = LIBAVUTIL_VERSION_INT, \ 562 #define RKMPP_DEC(NAME, ID, BSFS) \ 563 RKMPP_DEC_CLASS(NAME) \ 564 AVCodec ff_##NAME##_rkmpp_decoder = { \ 565 .name = #NAME "_rkmpp", \ 566 .long_name = NULL_IF_CONFIG_SMALL(#NAME " (rkmpp)"), \ 567 .type = AVMEDIA_TYPE_VIDEO, \ 569 .priv_data_size = sizeof(RKMPPDecodeContext), \ 570 .init = rkmpp_init_decoder, \ 571 .close = rkmpp_close_decoder, \ 572 .receive_frame = rkmpp_receive_frame, \ 573 .flush = rkmpp_flush, \ 574 .priv_class = &rkmpp_##NAME##_dec_class, \ 575 .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ 576 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_DRM_PRIME, \ 578 .hw_configs = rkmpp_hw_configs, \ 580 .wrapper_name = "rkmpp", \ static int rkmpp_write_data(AVCodecContext *avctx, uint8_t *buffer, int size, int64_t pts)
static void rkmpp_flush(AVCodecContext *avctx)
#define RECEIVE_FRAME_TIMEOUT
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
This structure describes decoded (raw) audio or video data.
#define FRAMEGROUP_MAX_FRAMES
ptrdiff_t const GLvoid * data
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static MppCodingType rkmpp_get_codingtype(AVCodecContext *avctx)
int width
The allocated dimensions of the frames in this pool.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int rkmpp_close_decoder(AVCodecContext *avctx)
#define HW_CONFIG_INTERNAL(format)
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
static int rkmpp_retrieve_frame(AVCodecContext *avctx, AVFrame *frame)
AVBufferRef * decoder_ref
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
static uint32_t rkmpp_get_frameformat(MppFrameFormat mppformat)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AVERROR_EOF
End of file.
int interlaced_frame
The content of the picture is interlaced.
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
static int rkmpp_init_decoder(AVCodecContext *avctx)
static void rkmpp_release_decoder(void *opaque, uint8_t *data)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define INPUT_MAX_PACKETS
enum AVColorRange color_range
MPEG vs JPEG YUV range.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
enum AVColorSpace colorspace
YUV colorspace type.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
reference-counted frame API
MppBufferGroup frame_group
static const chunk_decoder decoder[8]
int width
picture width / height.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
#define AV_LOG_INFO
Standard information.
static const AVCodecHWConfigInternal *const rkmpp_hw_configs[]
AVBufferRef * av_hwdevice_ctx_alloc(enum AVHWDeviceType type)
Allocate an AVHWDeviceContext for a given hardware type.
Libavcodec external API header.
static int rkmpp_receive_frame(AVCodecContext *avctx, AVFrame *frame)
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * data
The data buffer.
#define RKMPP_DEC(NAME, ID, BSFS)
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int av_hwdevice_ctx_init(AVBufferRef *ref)
Finalize the device context before use.
Describe the class of an AVClass context structure.
DRM-managed buffers exposed through PRIME buffer sharing.
This struct describes a set or pool of "hardware" frames (i.e.
refcounted data buffer API
API-specific header for AV_HWDEVICE_TYPE_DRM.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
A reference to a data buffer.
static int rkmpp_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
common internal api header.
common internal and external API header
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int top_field_first
If the content is interlaced, is top field displayed first.
enum AVColorPrimaries color_primaries
enum AVColorTransferCharacteristic color_trc
AVBufferRef * decoder_ref
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
This structure stores compressed data.
static void rkmpp_release_frame(void *opaque, uint8_t *data)
mode
Use these values in ebur128_init (or'ed).
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...