28 #if HAVE_LINUX_DMA_BUF_H 29 #include <linux/dma-buf.h> 30 #include <sys/ioctl.h> 56 hwctx->
fd = open(device, O_RDWR);
60 version = drmGetVersion(hwctx->
fd);
63 "from %s: probably not a DRM device?\n", device);
69 "version %d.%d.%d.\n", device, version->name,
70 version->version_major, version->version_minor,
71 version->version_patchlevel);
73 drmFreeVersion(version);
110 #if HAVE_LINUX_DMA_BUF_H 111 struct dma_buf_sync sync = { .flags = DMA_BUF_SYNC_END | map->
sync_flags };
112 ioctl(map->
object[
i], DMA_BUF_IOCTL_SYNC, &sync);
124 #
if HAVE_LINUX_DMA_BUF_H
125 struct dma_buf_sync sync_start = { 0 };
128 int err,
i, p, plane;
138 mmap_prot |= PROT_READ;
140 mmap_prot |= PROT_WRITE;
142 #if HAVE_LINUX_DMA_BUF_H 143 if (
flags & AV_HWFRAME_MAP_READ)
145 if (
flags & AV_HWFRAME_MAP_WRITE)
147 sync_start.flags = DMA_BUF_SYNC_START | map->
sync_flags;
151 for (
i = 0;
i <
desc->nb_objects;
i++) {
152 addr = mmap(
NULL,
desc->objects[
i].size, mmap_prot, MAP_SHARED,
153 desc->objects[
i].fd, 0);
154 if (addr == MAP_FAILED) {
157 "memory: %d.\n",
desc->objects[
i].fd, errno);
165 #if HAVE_LINUX_DMA_BUF_H 168 ioctl(
desc->objects[
i].fd, DMA_BUF_IOCTL_SYNC, &sync_start);
174 for (
i = 0;
i <
desc->nb_layers;
i++) {
186 dst->width =
src->width;
187 dst->height =
src->height;
197 for (
i = 0;
i <
desc->nb_objects;
i++) {
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
The mapped frame will be overwritten completely in subsequent operations, so the current frame data n...
This structure describes decoded (raw) audio or video data.
static int drm_map_from(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src, int flags)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int width
The allocated dimensions of the frames in this pool.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int object[AV_DRM_MAX_PLANES]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
The maximum number of layers/planes in a DRM frame.
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, AVDictionary *opts, int flags)
#define AV_LOG_VERBOSE
Detailed information.
static void drm_unmap_frame(AVHWFramesContext *hwfc, HWMapDescriptor *hwmap)
int object_index
Index of the object containing this plane in the objects array of the enclosing frame descriptor...
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
ptrdiff_t pitch
Pitch (linesize) of this plane.
static int drm_transfer_get_formats(AVHWFramesContext *ctx, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats)
The mapping must be writeable.
simple assert() macros that are a bit more flexible than ISO C assert().
int nb_planes
Number of planes in the layer.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES]
Array of planes in this layer.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
The mapping must be readable.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
const HWContextType ff_hwcontext_type_drm
void * priv
Hardware-specific private data associated with the mapping.
uint8_t * data
The data buffer.
static int drm_transfer_data_from(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void(*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv)
DRM-managed buffers exposed through PRIME buffer sharing.
This struct describes a set or pool of "hardware" frames (i.e.
static int drm_transfer_data_to(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
const VDPAUPixFmtMap * map
API-specific header for AV_HWDEVICE_TYPE_DRM.
static enum AVPixelFormat pix_fmts[]
#define flags(name, subs,...)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int fd
File descriptor of DRM device.
void * address[AV_DRM_MAX_PLANES]
static void drm_device_free(AVHWDeviceContext *hwdev)
AVHWFrameTransferDirection
AVBufferPool * pool
A pool from which the frames are allocated by av_hwframe_get_buffer().
size_t length[AV_DRM_MAX_PLANES]
ptrdiff_t offset
Offset within that object of this plane.
static int drm_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame)
void(* free)(struct AVHWDeviceContext *ctx)
This field may be set by the caller before calling av_hwdevice_ctx_init().
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
AVPixelFormat
Pixel format.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static int drm_map_frame(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src, int flags)