FFmpeg
v4l2_m2m_enc.c
Go to the documentation of this file.
1 /*
2  * V4L2 mem2mem encoders
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <search.h>
27 #include "libavcodec/avcodec.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
30 #include "libavutil/opt.h"
31 #include "v4l2_context.h"
32 #include "v4l2_m2m.h"
33 #include "v4l2_fmt.h"
34 
35 #define MPEG_CID(x) V4L2_CID_MPEG_VIDEO_##x
36 #define MPEG_VIDEO(x) V4L2_MPEG_VIDEO_##x
37 
38 static inline void v4l2_set_timeperframe(V4L2m2mContext *s, unsigned int num, unsigned int den)
39 {
40  struct v4l2_streamparm parm = { 0 };
41 
42  parm.type = V4L2_TYPE_IS_MULTIPLANAR(s->output.type) ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT;
43  parm.parm.output.timeperframe.denominator = den;
44  parm.parm.output.timeperframe.numerator = num;
45 
46  if (ioctl(s->fd, VIDIOC_S_PARM, &parm) < 0)
47  av_log(s->avctx, AV_LOG_WARNING, "Failed to set timeperframe");
48 }
49 
50 static inline void v4l2_set_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int value, const char *name)
51 {
52  struct v4l2_ext_controls ctrls = { { 0 } };
53  struct v4l2_ext_control ctrl = { 0 };
54 
55  /* set ctrls */
56  ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
57  ctrls.controls = &ctrl;
58  ctrls.count = 1;
59 
60  /* set ctrl*/
61  ctrl.value = value;
62  ctrl.id = id;
63 
64  if (ioctl(s->fd, VIDIOC_S_EXT_CTRLS, &ctrls) < 0)
65  av_log(s->avctx, AV_LOG_WARNING, "Failed to set %s: %s\n", name, strerror(errno));
66  else
67  av_log(s->avctx, AV_LOG_DEBUG, "Encoder: %s = %d\n", name, value);
68 }
69 
70 static inline int v4l2_get_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int *value, const char *name)
71 {
72  struct v4l2_ext_controls ctrls = { { 0 } };
73  struct v4l2_ext_control ctrl = { 0 };
74  int ret;
75 
76  /* set ctrls */
77  ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
78  ctrls.controls = &ctrl;
79  ctrls.count = 1;
80 
81  /* set ctrl*/
82  ctrl.id = id ;
83 
84  ret = ioctl(s->fd, VIDIOC_G_EXT_CTRLS, &ctrls);
85  if (ret < 0) {
86  av_log(s->avctx, AV_LOG_WARNING, "Failed to get %s\n", name);
87  return ret;
88  }
89 
90  *value = ctrl.value;
91 
92  return 0;
93 }
94 
95 static inline unsigned int v4l2_h264_profile_from_ff(int p)
96 {
97  static const struct h264_profile {
98  unsigned int ffmpeg_val;
99  unsigned int v4l2_val;
100  } profile[] = {
101  { FF_PROFILE_H264_CONSTRAINED_BASELINE, MPEG_VIDEO(H264_PROFILE_CONSTRAINED_BASELINE) },
102  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, MPEG_VIDEO(H264_PROFILE_HIGH_444_PREDICTIVE) },
103  { FF_PROFILE_H264_HIGH_422_INTRA, MPEG_VIDEO(H264_PROFILE_HIGH_422_INTRA) },
104  { FF_PROFILE_H264_HIGH_444_INTRA, MPEG_VIDEO(H264_PROFILE_HIGH_444_INTRA) },
105  { FF_PROFILE_H264_HIGH_10_INTRA, MPEG_VIDEO(H264_PROFILE_HIGH_10_INTRA) },
106  { FF_PROFILE_H264_HIGH_422, MPEG_VIDEO(H264_PROFILE_HIGH_422) },
107  { FF_PROFILE_H264_BASELINE, MPEG_VIDEO(H264_PROFILE_BASELINE) },
108  { FF_PROFILE_H264_EXTENDED, MPEG_VIDEO(H264_PROFILE_EXTENDED) },
109  { FF_PROFILE_H264_HIGH_10, MPEG_VIDEO(H264_PROFILE_HIGH_10) },
110  { FF_PROFILE_H264_MAIN, MPEG_VIDEO(H264_PROFILE_MAIN) },
111  { FF_PROFILE_H264_HIGH, MPEG_VIDEO(H264_PROFILE_HIGH) },
112  };
113  int i;
114 
115  for (i = 0; i < FF_ARRAY_ELEMS(profile); i++) {
116  if (profile[i].ffmpeg_val == p)
117  return profile[i].v4l2_val;
118  }
119  return AVERROR(ENOENT);
120 }
121 
122 static inline int v4l2_mpeg4_profile_from_ff(int p)
123 {
124  static const struct mpeg4_profile {
125  unsigned int ffmpeg_val;
126  unsigned int v4l2_val;
127  } profile[] = {
128  { FF_PROFILE_MPEG4_ADVANCED_CODING, MPEG_VIDEO(MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY) },
129  { FF_PROFILE_MPEG4_ADVANCED_SIMPLE, MPEG_VIDEO(MPEG4_PROFILE_ADVANCED_SIMPLE) },
130  { FF_PROFILE_MPEG4_SIMPLE_SCALABLE, MPEG_VIDEO(MPEG4_PROFILE_SIMPLE_SCALABLE) },
131  { FF_PROFILE_MPEG4_SIMPLE, MPEG_VIDEO(MPEG4_PROFILE_SIMPLE) },
132  { FF_PROFILE_MPEG4_CORE, MPEG_VIDEO(MPEG4_PROFILE_CORE) },
133  };
134  int i;
135 
136  for (i = 0; i < FF_ARRAY_ELEMS(profile); i++) {
137  if (profile[i].ffmpeg_val == p)
138  return profile[i].v4l2_val;
139  }
140  return AVERROR(ENOENT);
141 }
142 
144 {
145  if (s->avctx->max_b_frames)
146  av_log(s->avctx, AV_LOG_WARNING, "Encoder does not support b-frames yet\n");
147 
148  v4l2_set_ext_ctrl(s, MPEG_CID(B_FRAMES), 0, "number of B-frames");
149  v4l2_get_ext_ctrl(s, MPEG_CID(B_FRAMES), &s->avctx->max_b_frames, "number of B-frames");
150  if (s->avctx->max_b_frames == 0)
151  return 0;
152 
153  avpriv_report_missing_feature(s->avctx, "DTS/PTS calculation for V4L2 encoding");
154 
155  return AVERROR_PATCHWELCOME;
156 }
157 
159 {
160  AVCodecContext *avctx = s->avctx;
161  int qmin_cid, qmax_cid, qmin, qmax;
162  int ret, val;
163 
164  /**
165  * requirements
166  */
168  if (ret)
169  return ret;
170 
171  /**
172  * settingss
173  */
174  if (avctx->framerate.num || avctx->framerate.den)
175  v4l2_set_timeperframe(s, avctx->framerate.num, avctx->framerate.den);
176 
177  /* set ext ctrls */
178  v4l2_set_ext_ctrl(s, MPEG_CID(HEADER_MODE), MPEG_VIDEO(HEADER_MODE_SEPARATE), "header mode");
179  v4l2_set_ext_ctrl(s, MPEG_CID(BITRATE) , avctx->bit_rate, "bit rate");
180  v4l2_set_ext_ctrl(s, MPEG_CID(GOP_SIZE), avctx->gop_size,"gop size");
181 
182  av_log(avctx, AV_LOG_DEBUG,
183  "Encoder Context: id (%d), profile (%d), frame rate(%d/%d), number b-frames (%d), "
184  "gop size (%d), bit rate (%"PRId64"), qmin (%d), qmax (%d)\n",
185  avctx->codec_id, avctx->profile, avctx->framerate.num, avctx->framerate.den,
186  avctx->max_b_frames, avctx->gop_size, avctx->bit_rate, avctx->qmin, avctx->qmax);
187 
188  switch (avctx->codec_id) {
189  case AV_CODEC_ID_H264:
190  val = v4l2_h264_profile_from_ff(avctx->profile);
191  if (val < 0)
192  av_log(avctx, AV_LOG_WARNING, "h264 profile not found\n");
193  else
194  v4l2_set_ext_ctrl(s, MPEG_CID(H264_PROFILE), val, "h264 profile");
195  qmin_cid = MPEG_CID(H264_MIN_QP);
196  qmax_cid = MPEG_CID(H264_MAX_QP);
197  qmin = 0;
198  qmax = 51;
199  break;
200  case AV_CODEC_ID_MPEG4:
201  val = v4l2_mpeg4_profile_from_ff(avctx->profile);
202  if (val < 0)
203  av_log(avctx, AV_LOG_WARNING, "mpeg4 profile not found\n");
204  else
205  v4l2_set_ext_ctrl(s, MPEG_CID(MPEG4_PROFILE), val, "mpeg4 profile");
206  qmin_cid = MPEG_CID(MPEG4_MIN_QP);
207  qmax_cid = MPEG_CID(MPEG4_MAX_QP);
208  if (avctx->flags & AV_CODEC_FLAG_QPEL)
209  v4l2_set_ext_ctrl(s, MPEG_CID(MPEG4_QPEL), 1, "qpel");
210  qmin = 1;
211  qmax = 31;
212  break;
213  case AV_CODEC_ID_H263:
214  qmin_cid = MPEG_CID(H263_MIN_QP);
215  qmax_cid = MPEG_CID(H263_MAX_QP);
216  qmin = 1;
217  qmax = 31;
218  break;
219  case AV_CODEC_ID_VP8:
220  qmin_cid = MPEG_CID(VPX_MIN_QP);
221  qmax_cid = MPEG_CID(VPX_MAX_QP);
222  qmin = 0;
223  qmax = 127;
224  break;
225  case AV_CODEC_ID_VP9:
226  qmin_cid = MPEG_CID(VPX_MIN_QP);
227  qmax_cid = MPEG_CID(VPX_MAX_QP);
228  qmin = 0;
229  qmax = 255;
230  break;
231  default:
232  return 0;
233  }
234 
235  if (qmin != avctx->qmin || qmax != avctx->qmax)
236  av_log(avctx, AV_LOG_WARNING, "Encoder adjusted: qmin (%d), qmax (%d)\n", qmin, qmax);
237 
238  v4l2_set_ext_ctrl(s, qmin_cid, qmin, "minimum video quantizer scale");
239  v4l2_set_ext_ctrl(s, qmax_cid, qmax, "maximum video quantizer scale");
240 
241  return 0;
242 }
243 
244 static int v4l2_send_frame(AVCodecContext *avctx, const AVFrame *frame)
245 {
246  V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context;
247  V4L2Context *const output = &s->output;
248 
249 #ifdef V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME
250  if (frame && frame->pict_type == AV_PICTURE_TYPE_I)
251  v4l2_set_ext_ctrl(s, MPEG_CID(FORCE_KEY_FRAME), 0, "force key frame");
252 #endif
253 
254  return ff_v4l2_context_enqueue_frame(output, frame);
255 }
256 
257 static int v4l2_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
258 {
259  V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context;
260  V4L2Context *const capture = &s->capture;
261  V4L2Context *const output = &s->output;
262  int ret;
263 
264  if (s->draining)
265  goto dequeue;
266 
267  if (!output->streamon) {
268  ret = ff_v4l2_context_set_status(output, VIDIOC_STREAMON);
269  if (ret) {
270  av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMON failed on output context\n");
271  return ret;
272  }
273  }
274 
275  if (!capture->streamon) {
276  ret = ff_v4l2_context_set_status(capture, VIDIOC_STREAMON);
277  if (ret) {
278  av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMON failed on capture context\n");
279  return ret;
280  }
281  }
282 
283 dequeue:
284  return ff_v4l2_context_dequeue_packet(capture, avpkt);
285 }
286 
288 {
289  V4L2Context *capture, *output;
290  V4L2m2mContext *s;
291  V4L2m2mPriv *priv = avctx->priv_data;
292  enum AVPixelFormat pix_fmt_output;
293  uint32_t v4l2_fmt_output;
294  int ret;
295 
296  ret = ff_v4l2_m2m_create_context(priv, &s);
297  if (ret < 0)
298  return ret;
299 
300  capture = &s->capture;
301  output = &s->output;
302 
303  /* common settings output/capture */
304  output->height = capture->height = avctx->height;
305  output->width = capture->width = avctx->width;
306 
307  /* output context */
309  output->av_pix_fmt = avctx->pix_fmt;
310 
311  /* capture context */
312  capture->av_codec_id = avctx->codec_id;
313  capture->av_pix_fmt = AV_PIX_FMT_NONE;
314 
315  s->avctx = avctx;
316  ret = ff_v4l2_m2m_codec_init(priv);
317  if (ret) {
318  av_log(avctx, AV_LOG_ERROR, "can't configure encoder\n");
319  return ret;
320  }
321 
322  if (V4L2_TYPE_IS_MULTIPLANAR(output->type))
323  v4l2_fmt_output = output->format.fmt.pix_mp.pixelformat;
324  else
325  v4l2_fmt_output = output->format.fmt.pix.pixelformat;
326 
327  pix_fmt_output = ff_v4l2_format_v4l2_to_avfmt(v4l2_fmt_output, AV_CODEC_ID_RAWVIDEO);
328  if (pix_fmt_output != avctx->pix_fmt) {
329  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt_output);
330  av_log(avctx, AV_LOG_ERROR, "Encoder requires %s pixel format.\n", desc->name);
331  return AVERROR(EINVAL);
332  }
333 
334  return v4l2_prepare_encoder(s);
335 }
336 
338 {
339  return ff_v4l2_m2m_codec_end(avctx->priv_data);
340 }
341 
342 #define OFFSET(x) offsetof(V4L2m2mPriv, x)
343 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
344 
345 static const AVOption options[] = {
347  { "num_capture_buffers", "Number of buffers in the capture context",
348  OFFSET(num_capture_buffers), AV_OPT_TYPE_INT, {.i64 = 4 }, 4, INT_MAX, FLAGS },
349  { NULL },
350 };
351 
352 #define M2MENC_CLASS(NAME) \
353  static const AVClass v4l2_m2m_ ## NAME ## _enc_class = { \
354  .class_name = #NAME "_v4l2m2m_encoder", \
355  .item_name = av_default_item_name, \
356  .option = options, \
357  .version = LIBAVUTIL_VERSION_INT, \
358  };
359 
360 #define M2MENC(NAME, LONGNAME, CODEC) \
361  M2MENC_CLASS(NAME) \
362  AVCodec ff_ ## NAME ## _v4l2m2m_encoder = { \
363  .name = #NAME "_v4l2m2m" , \
364  .long_name = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " encoder wrapper"), \
365  .type = AVMEDIA_TYPE_VIDEO, \
366  .id = CODEC , \
367  .priv_data_size = sizeof(V4L2m2mPriv), \
368  .priv_class = &v4l2_m2m_ ## NAME ##_enc_class, \
369  .init = v4l2_encode_init, \
370  .send_frame = v4l2_send_frame, \
371  .receive_packet = v4l2_receive_packet, \
372  .close = v4l2_encode_close, \
373  .capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY, \
374  .wrapper_name = "v4l2m2m", \
375  };
376 
377 M2MENC(mpeg4,"MPEG4", AV_CODEC_ID_MPEG4);
378 M2MENC(h263, "H.263", AV_CODEC_ID_H263);
379 M2MENC(h264, "H.264", AV_CODEC_ID_H264);
380 M2MENC(hevc, "HEVC", AV_CODEC_ID_HEVC);
381 M2MENC(vp8, "VP8", AV_CODEC_ID_VP8);
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:2995
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
Definition: v4l2_fmt.c:132
#define FF_PROFILE_MPEG4_SIMPLE
Definition: avcodec.h:3014
#define NULL
Definition: coverity.c:32
AVRational framerate
Definition: avcodec.h:3161
const char const char void * val
Definition: avisynth_c.h:863
static const AVOption options[]
Definition: v4l2_m2m_enc.c:345
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1671
const char * desc
Definition: nvenc.c:68
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1881
static int v4l2_mpeg4_profile_from_ff(int p)
Definition: v4l2_m2m_enc.c:122
int num
Numerator.
Definition: rational.h:59
#define FLAGS
Definition: v4l2_m2m_enc.c:343
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1831
int width
Width and height of the frames it produces (in case of a capture context, e.g.
Definition: v4l2_context.h:71
#define MPEG_VIDEO(x)
Definition: v4l2_m2m_enc.c:36
#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE
Definition: avcodec.h:3015
int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv)
Releases all the codec resources if all AVBufferRefs have been returned to the ctx.
Definition: v4l2_m2m.c:336
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
Definition: v4l2_context.c:625
int profile
profile
Definition: avcodec.h:2954
static int v4l2_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: v4l2_m2m_enc.c:257
static av_cold int v4l2_encode_init(AVCodecContext *avctx)
Definition: v4l2_m2m_enc.c:287
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:3005
int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
Probes the video nodes looking for the required codec capabilities.
Definition: v4l2_m2m.c:357
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
Definition: v4l2_context.h:59
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2993
int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
Allocate a new context and references for a V4L2 M2M instance.
Definition: v4l2_m2m.c:395
#define av_cold
Definition: attributes.h:82
#define FF_PROFILE_MPEG4_CORE
Definition: avcodec.h:3016
AVOptions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
#define OFFSET(x)
Definition: v4l2_m2m_enc.c:342
static int v4l2_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Definition: v4l2_m2m_enc.c:244
static void v4l2_set_timeperframe(V4L2m2mContext *s, unsigned int num, unsigned int den)
Definition: v4l2_m2m_enc.c:38
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:2996
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static av_cold int v4l2_encode_close(AVCodecContext *avctx)
Definition: v4l2_m2m_enc.c:337
int qmax
maximum quantizer
Definition: avcodec.h:2470
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:3001
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1701
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:2997
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:53
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:87
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
static int v4l2_get_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int *value, const char *name)
Definition: v4l2_m2m_enc.c:70
static int v4l2_check_b_frame_support(V4L2m2mContext *s)
Definition: v4l2_m2m_enc.c:143
int width
picture width / height.
Definition: avcodec.h:1794
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_ARRAY_ELEMS(a)
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:65
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
Definition: v4l2_context.c:541
V4L2Context capture
Definition: v4l2_m2m.h:48
Libavcodec external API header.
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:3002
enum AVCodecID codec_id
Definition: avcodec.h:1631
#define M2MENC(NAME, LONGNAME, CODEC)
Definition: v4l2_m2m_enc.c:360
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
main external API structure.
Definition: avcodec.h:1621
#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE
Definition: avcodec.h:3029
int qmin
minimum quantizer
Definition: avcodec.h:2463
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:2999
#define FF_PROFILE_MPEG4_ADVANCED_CODING
Definition: avcodec.h:3025
mfxU16 profile
Definition: qsvenc.c:44
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
V4L2Context output
Definition: v4l2_m2m.h:49
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:880
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1816
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
Definition: v4l2_context.c:555
int den
Denominator.
Definition: rational.h:60
#define MPEG_CID(x)
Definition: v4l2_m2m_enc.c:35
static int v4l2_prepare_encoder(V4L2m2mContext *s)
Definition: v4l2_m2m_enc.c:158
void * priv_data
Definition: avcodec.h:1648
pixel format definitions
#define V4L_M2M_DEFAULT_OPTS
Definition: v4l2_m2m.h:39
static unsigned int v4l2_h264_profile_from_ff(int p)
Definition: v4l2_m2m_enc.c:95
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:3006
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:2994
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:2998
enum AVCodecID id
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1510
static void v4l2_set_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int value, const char *name)
Definition: v4l2_m2m_enc.c:50
enum v4l2_buf_type type
Type of this buffer context.
Definition: v4l2_context.h:47
const char * name
Definition: opengl_enc.c:102