FFmpeg
qsvenc_av1.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV based AV1 encoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 
22 #include <stdint.h>
23 #include <sys/types.h>
24 
25 #include <mfxvideo.h>
26 
27 #include "libavutil/common.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/opt.h"
31 
32 #include "avcodec.h"
33 #include "codec_internal.h"
34 #include "bsf.h"
35 #include "qsv.h"
36 #include "qsvenc.h"
37 
38 typedef struct QSVAV1EncContext {
39  AVClass *class;
43 
45  const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
46 {
47  QSVAV1EncContext *q = avctx->priv_data;
48  AVFrameSideData *sd;
49 
50  if (!frame || !QSV_RUNTIME_VERSION_ATLEAST(q->qsv.ver, 2, 11))
51  return 0;
52 
54  if (sd) {
56  if (mdm->has_primaries && mdm->has_luminance) {
57  const int chroma_den = 1 << 16;
58  const int max_luma_den = 1 << 8;
59  const int min_luma_den = 1 << 14;
60  mfxExtMasteringDisplayColourVolume *mdcv = av_mallocz(sizeof(*mdcv));
61  if (!mdcv)
62  return AVERROR(ENOMEM);
63 
64  mdcv->Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
65  mdcv->Header.BufferSz = sizeof(*mdcv);
66 
67  for (int i = 0; i < 3; i++) {
68  mdcv->DisplayPrimariesX[i] =
69  av_rescale(mdm->display_primaries[i][0].num, chroma_den,
70  mdm->display_primaries[i][0].den);
71  mdcv->DisplayPrimariesY[i] =
72  av_rescale(mdm->display_primaries[i][1].num, chroma_den,
73  mdm->display_primaries[i][1].den);
74  }
75 
76  mdcv->WhitePointX =
77  av_rescale(mdm->white_point[0].num, chroma_den,
78  mdm->white_point[0].den);
79  mdcv->WhitePointY =
80  av_rescale(mdm->white_point[1].num, chroma_den,
81  mdm->white_point[1].den);
82 
83  mdcv->MaxDisplayMasteringLuminance =
84  av_rescale(mdm->max_luminance.num, max_luma_den,
85  mdm->max_luminance.den);
86  mdcv->MinDisplayMasteringLuminance =
87  av_rescale(mdm->min_luminance.num, min_luma_den,
88  mdm->min_luminance.den);
89 
90  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)mdcv;
91  }
92  }
93 
95  if (sd) {
97  mfxExtContentLightLevelInfo *clli = av_mallocz(sizeof(*clli));
98  if (!clli)
99  return AVERROR(ENOMEM);
100 
101  clli->Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
102  clli->Header.BufferSz = sizeof(*clli);
103 
104  clli->MaxContentLightLevel = clm->MaxCLL;
105  clli->MaxPicAverageLightLevel = clm->MaxFALL;
106 
107  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)clli;
108  }
109 
110  return 0;
111 }
112 
114 {
115  QSVAV1EncContext *q = avctx->priv_data;
116  int ret;
117 
118  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
119  const AVBitStreamFilter *filter = av_bsf_get_by_name("extract_extradata");
120  if (!filter) {
121  av_log(avctx, AV_LOG_ERROR, "Cannot get extract_extradata bitstream filter\n");
122  return AVERROR_BUG;
123  }
125  if (ret < 0)
126  return ret;
128  if (ret < 0)
129  return ret;
131  if (ret < 0)
132  return ret;
133  }
134 
136 
137  return ff_qsv_enc_init(avctx, &q->qsv);
138 }
139 
141  const AVFrame *frame, int *got_packet)
142 {
143  QSVAV1EncContext *q = avctx->priv_data;
144  int ret;
145 
146  ret = ff_qsv_encode(avctx, &q->qsv, pkt, frame, got_packet);
147  if (ret < 0)
148  return ret;
149 
150  if (*got_packet && avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
152  if (ret < 0) {
153  av_log(avctx, AV_LOG_ERROR, "extract_extradata filter "
154  "failed to send input packet\n");
155  return ret;
156  }
157 
159  if (ret < 0) {
160  av_log(avctx, AV_LOG_ERROR, "extract_extradata filter "
161  "failed to receive output packet\n");
162  return ret;
163  }
164  }
165 
166  return ret;
167 }
168 
170 {
171  QSVAV1EncContext *q = avctx->priv_data;
172 
174 
175  return ff_qsv_enc_close(avctx, &q->qsv);
176 }
177 
178 #define OFFSET(x) offsetof(QSVAV1EncContext, x)
179 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
180 static const AVOption options[] = {
188  { "profile", NULL, OFFSET(qsv.profile), AV_OPT_TYPE_INT, { .i64 = MFX_PROFILE_UNKNOWN }, 0, INT_MAX, VE, .unit = "profile" },
189  { "unknown" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, VE, .unit = "profile" },
190  { "main" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_AV1_MAIN }, INT_MIN, INT_MAX, VE, .unit = "profile" },
191  { "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
192  { "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
193  { "look_ahead_depth", "Depth of look ahead in number frames, available when extbrc option is enabled", OFFSET(qsv.look_ahead_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, VE },
194  { NULL },
195 };
196 
197 static const AVClass class = {
198  .class_name = "av1_qsv encoder",
199  .item_name = av_default_item_name,
200  .option = options,
202 };
203 
205  { "b", "0" },
206  { "g", "-1" },
207  { "bf", "-1" },
208  { "refs", "0" },
209  { NULL },
210 };
211 
213  .p.name = "av1_qsv",
214  .p.long_name = NULL_IF_CONFIG_SMALL("AV1 (Intel Quick Sync Video acceleration)"),
215  .priv_data_size = sizeof(QSVAV1EncContext),
216  .p.type = AVMEDIA_TYPE_VIDEO,
217  .p.id = AV_CODEC_ID_AV1,
218  .init = qsv_enc_init,
220  .close = qsv_enc_close,
221  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
222  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
225  AV_PIX_FMT_NONE },
226  .p.priv_class = &class,
227  .defaults = qsv_enc_defaults,
228  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
229  .p.wrapper_name = "qsv",
230  .hw_configs = ff_qsv_enc_hw_configs,
231 };
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
AVBSFContext::par_in
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:90
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:947
QSV_OPTION_ADAPTIVE_B
#define QSV_OPTION_ADAPTIVE_B
Definition: qsvenc.h:101
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVOption
AVOption.
Definition: opt.h:357
FFCodec
Definition: codec_internal.h:126
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
qsv_enc_defaults
static const FFCodecDefault qsv_enc_defaults[]
Definition: qsvenc_av1.c:204
av_bsf_free
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:52
AVBSFContext
The bitstream filter state.
Definition: bsf.h:68
QSVAV1EncContext::extra_data_bsf
AVBSFContext * extra_data_bsf
Definition: qsvenc_av1.c:40
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:338
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
FFCodecDefault
Definition: codec_internal.h:96
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
bsf.h
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
AVRational::num
int num
Numerator.
Definition: rational.h:59
qsv_enc_close
static av_cold int qsv_enc_close(AVCodecContext *avctx)
Definition: qsvenc_av1.c:169
ff_qsv_enc_hw_configs
const AVCodecHWConfigInternal *const ff_qsv_enc_hw_configs[]
Definition: qsvenc.c:2760
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: qsvenc_av1.c:178
QSVEncContext
Definition: qsvenc.h:157
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
qsvenc.h
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:63
QSVAV1EncContext
Definition: qsvenc_av1.c:38
av_bsf_alloc
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:104
if
if(ret)
Definition: filter_design.txt:179
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:149
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
QSV_OPTION_LOW_DELAY_BRC
#define QSV_OPTION_LOW_DELAY_BRC
Definition: qsvenc.h:113
NULL
#define NULL
Definition: coverity.c:32
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:280
qsv.h
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:230
options
static const AVOption options[]
Definition: qsvenc_av1.c:180
QSV_COMMON_OPTS
#define QSV_COMMON_OPTS
Definition: qsvenc.h:56
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
QSV_OPTION_ADAPTIVE_I
#define QSV_OPTION_ADAPTIVE_I
Definition: qsvenc.h:98
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
codec_internal.h
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:202
qsv_enc_frame
static int qsv_enc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc_av1.c:140
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
qsv_av1_set_encode_ctrl
static int qsv_av1_set_encode_ctrl(AVCodecContext *avctx, const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
Definition: qsvenc_av1.c:44
qsv_enc_init
static av_cold int qsv_enc_init(AVCodecContext *avctx)
Definition: qsvenc_av1.c:113
QSV_OPTION_EXTBRC
#define QSV_OPTION_EXTBRC
Definition: qsvenc.h:95
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
ff_qsv_enc_close
int ff_qsv_enc_close(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:2708
QSVEncContext::set_encode_ctrl_cb
SetEncodeCtrlCB * set_encode_ctrl_cb
Definition: qsvenc.h:268
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
common.h
ff_av1_qsv_encoder
FFCodec ff_av1_qsv_encoder
Definition: qsvenc_av1.c:212
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
avcodec.h
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSV_OPTION_MAX_FRAME_SIZE
#define QSV_OPTION_MAX_FRAME_SIZE
Definition: qsvenc.h:81
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVBitStreamFilter
Definition: bsf.h:111
QSVAV1EncContext::qsv
QSVEncContext qsv
Definition: qsvenc_av1.c:41
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
QSVEncContext::ver
mfxVersion ver
Definition: qsvenc.h:209
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:528
VE
#define VE
Definition: qsvenc_av1.c:179
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
ff_qsv_enc_init
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:1635
mastering_display_metadata.h
AV_CODEC_CAP_HYBRID
#define AV_CODEC_CAP_HYBRID
Codec is potentially backed by a hardware implementation, but not necessarily.
Definition: codec.h:152
avcodec_parameters_from_context
int avcodec_parameters_from_context(struct AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:137
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:116
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
ff_qsv_encode
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc.c:2640
av_bsf_get_by_name
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
Definition: bitstream_filters.c:86
QSV_OPTION_B_STRATEGY
#define QSV_OPTION_B_STRATEGY
Definition: qsvenc.h:107