24 #include <linux/videodev2.h> 25 #include <sys/ioctl.h> 42 struct v4l2_selection selection = { 0 };
59 ret = ioctl(s->
fd, VIDIOC_G_FMT, &capture->
format);
70 selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
73 ret = ioctl(s->
fd, VIDIOC_S_SELECTION, &selection);
75 ret = ioctl(s->
fd, VIDIOC_G_SELECTION, &selection);
79 av_log(avctx,
AV_LOG_DEBUG,
"crop output %dx%d\n", selection.r.width, selection.r.height);
81 capture->
height = selection.r.height;
82 capture->
width = selection.r.width;
107 struct v4l2_event_subscription sub;
114 memset(&sub, 0,
sizeof(sub));
115 sub.type = V4L2_EVENT_SOURCE_CHANGE;
116 ret = ioctl(s->
fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
118 if (output->height == 0 || output->width == 0) {
120 "the v4l2 driver does not support VIDIOC_SUBSCRIBE_EVENT\n" 121 "you must provide codec_height and codec_width on input\n");
126 memset(&sub, 0,
sizeof(sub));
127 sub.type = V4L2_EVENT_EOS;
128 ret = ioctl(s->
fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
131 "the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT\n");
153 if (ret < 0 && ret !=
AVERROR(EAGAIN))
219 #define OFFSET(x) offsetof(V4L2m2mPriv, x) 220 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 224 {
"num_capture_buffers",
"Number of buffers in the capture context",
229 #define M2MDEC_CLASS(NAME) \ 230 static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \ 231 .class_name = #NAME "_v4l2m2m_decoder", \ 232 .item_name = av_default_item_name, \ 234 .version = LIBAVUTIL_VERSION_INT, \ 237 #define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \ 239 AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \ 240 .name = #NAME "_v4l2m2m" , \ 241 .long_name = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder wrapper"), \ 242 .type = AVMEDIA_TYPE_VIDEO, \ 244 .priv_data_size = sizeof(V4L2m2mPriv), \ 245 .priv_class = &v4l2_m2m_ ## NAME ## _dec_class, \ 246 .init = v4l2_decode_init, \ 247 .receive_frame = v4l2_receive_frame, \ 248 .close = v4l2_decode_close, \ 250 .capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \ 251 .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_INIT_CLEANUP, \ 252 .wrapper_name = "v4l2m2m", \
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
static const AVOption options[]
This structure describes decoded (raw) audio or video data.
int coded_width
Bitstream width / height, may be different from width/height e.g.
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
Width and height of the frames it produces (in case of a capture context, e.g.
int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv)
Releases all the codec resources if all AVBufferRefs have been returned to the ctx.
int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
Probes the video nodes looking for the required codec capabilities.
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
Allocate a new context and references for a V4L2 M2M instance.
static int v4l2_receive_frame(AVCodecContext *avctx, AVFrame *frame)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
#define AVERROR_EOF
End of file.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
V4L2Buffer * buffers
Indexed array of V4L2Buffers.
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
preferred ID for MPEG-1/2 video decoding
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
static int v4l2_try_start(AVCodecContext *avctx)
Libavcodec external API header.
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
#define M2MDEC(NAME, LONGNAME, CODEC, bsf_name)
static int v4l2_prepare_decoder(V4L2m2mContext *s)
static av_cold int v4l2_decode_init(AVCodecContext *avctx)
static av_cold int v4l2_decode_close(AVCodecContext *avctx)
common internal api header.
#define V4L_M2M_DEFAULT_OPTS
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum v4l2_buf_type type
Type of this buffer context.