Go to the documentation of this file.
28 #if HAVE_LINUX_DMA_BUF_H
29 #include <linux/dma-buf.h>
30 #include <sys/ioctl.h>
57 hwctx->
fd = open(device, O_RDWR);
64 "from %s: probably not a DRM device?\n", device);
70 "version %d.%d.%d.\n", device,
version->name,
87 frame->data[0] = (uint8_t*)
frame->buf[0]->data;
110 for (
int i = 0;
i <
map->nb_regions;
i++) {
111 #if HAVE_LINUX_DMA_BUF_H
112 struct dma_buf_sync sync = { .flags = DMA_BUF_SYNC_END |
map->sync_flags };
113 ioctl(
map->object[
i], DMA_BUF_IOCTL_SYNC, &sync);
115 munmap(
map->address[
i],
map->length[
i]);
125 #
if HAVE_LINUX_DMA_BUF_H
126 struct dma_buf_sync sync_start = { 0 };
129 int err,
i, p, plane;
139 mmap_prot |= PROT_READ;
141 mmap_prot |= PROT_WRITE;
143 #if HAVE_LINUX_DMA_BUF_H
145 map->sync_flags |= DMA_BUF_SYNC_READ;
147 map->sync_flags |= DMA_BUF_SYNC_WRITE;
148 sync_start.flags = DMA_BUF_SYNC_START |
map->sync_flags;
152 for (
i = 0;
i <
desc->nb_objects;
i++) {
153 addr = mmap(
NULL,
desc->objects[
i].size, mmap_prot, MAP_SHARED,
154 desc->objects[
i].fd, 0);
155 if (addr == MAP_FAILED) {
158 "memory: %d.\n",
desc->objects[
i].fd, errno);
162 map->address[
i] = addr;
166 #if HAVE_LINUX_DMA_BUF_H
169 ioctl(
desc->objects[
i].fd, DMA_BUF_IOCTL_SYNC, &sync_start);
175 for (
i = 0;
i <
desc->nb_layers;
i++) {
188 dst->height =
src->height;
198 for (
i = 0;
i <
desc->nb_objects;
i++) {
200 munmap(
map->address[
i],
map->length[
i]);
235 map->format =
dst->format;
242 map->height =
dst->height;
266 map->format =
src->format;
274 map->height =
src->height;
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_HWFRAME_MAP_OVERWRITE
The mapped frame will be overwritten completely in subsequent operations, so the current frame data n...
static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, AVDictionary *opts, int flags)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_DRM_PRIME
DRM-managed buffers exposed through PRIME buffer sharing.
const HWContextType ff_hwcontext_type_drm
#define AV_LOG_VERBOSE
Detailed information.
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void(*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv)
void * priv
Hardware-specific private data associated with the mapping.
int width
The allocated dimensions of the frames in this pool.
static int drm_transfer_get_formats(AVHWFramesContext *ctx, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats)
static int drm_transfer_data_to(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
int fd
File descriptor of DRM device.
void(* free)(struct AVHWDeviceContext *ctx)
This field may be set by the caller before calling av_hwdevice_ctx_init().
int nb_planes
Number of planes in the layer.
AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES]
Array of planes in this layer.
static void drm_device_free(AVHWDeviceContext *hwdev)
ptrdiff_t offset
Offset within that object of this plane.
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
@ AV_HWFRAME_MAP_READ
The mapping must be readable.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
AVBufferPool * pool
A pool from which the frames are allocated by av_hwframe_get_buffer().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
static int drm_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame)
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
@ AV_DRM_MAX_PLANES
The maximum number of layers/planes in a DRM frame.
@ AV_HWFRAME_MAP_WRITE
The mapping must be writeable.
static int drm_map_from(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src, int flags)
static int drm_map_frame(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src, int flags)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int object_index
Index of the object containing this plane in the objects array of the enclosing frame descriptor.
AVHWFrameTransferDirection
This struct describes a set or pool of "hardware" frames (i.e.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void drm_unmap_frame(AVHWFramesContext *hwfc, HWMapDescriptor *hwmap)
size_t length[AV_DRM_MAX_PLANES]
void * address[AV_DRM_MAX_PLANES]
static int drm_transfer_data_from(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
const VDPAUPixFmtMap * map
#define flags(name, subs,...)
ptrdiff_t pitch
Pitch (linesize) of this plane.