FFmpeg
mfenc.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #define COBJMACROS
20 #if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
21 #undef _WIN32_WINNT
22 #define _WIN32_WINNT 0x0602
23 #endif
24 
25 #include "encode.h"
26 #include "mf_utils.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/time.h"
30 #include "internal.h"
31 
32 typedef struct MFContext {
37  IMFTransform *mft;
38  IMFMediaEventGenerator *async_events;
40  MFT_INPUT_STREAM_INFO in_info;
41  MFT_OUTPUT_STREAM_INFO out_info;
46  int64_t reorder_delay;
47  ICodecAPI *codec_api;
48  // set by AVOption
53 } MFContext;
54 
55 static int mf_choose_output_type(AVCodecContext *avctx);
56 static int mf_setup_context(AVCodecContext *avctx);
57 
58 #define MF_TIMEBASE (AVRational){1, 10000000}
59 // Sentinel value only used by us.
60 #define MF_INVALID_TIME AV_NOPTS_VALUE
61 
62 static int mf_wait_events(AVCodecContext *avctx)
63 {
64  MFContext *c = avctx->priv_data;
65 
66  if (!c->async_events)
67  return 0;
68 
69  while (!(c->async_need_input || c->async_have_output || c->draining_done || c->async_marker)) {
70  IMFMediaEvent *ev = NULL;
71  MediaEventType ev_id = 0;
72  HRESULT hr = IMFMediaEventGenerator_GetEvent(c->async_events, 0, &ev);
73  if (FAILED(hr)) {
74  av_log(avctx, AV_LOG_ERROR, "IMFMediaEventGenerator_GetEvent() failed: %s\n",
75  ff_hr_str(hr));
76  return AVERROR_EXTERNAL;
77  }
78  IMFMediaEvent_GetType(ev, &ev_id);
79  switch (ev_id) {
81  if (!c->draining)
82  c->async_need_input = 1;
83  break;
85  c->async_have_output = 1;
86  break;
88  c->draining_done = 1;
89  break;
91  c->async_marker = 1;
92  break;
93  default: ;
94  }
95  IMFMediaEvent_Release(ev);
96  }
97 
98  return 0;
99 }
100 
102 {
103  if (avctx->time_base.num > 0 && avctx->time_base.den > 0)
104  return avctx->time_base;
105  return MF_TIMEBASE;
106 }
107 
108 static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
109 {
110  if (av_pts == AV_NOPTS_VALUE)
111  return MF_INVALID_TIME;
112  return av_rescale_q(av_pts, mf_get_tb(avctx), MF_TIMEBASE);
113 }
114 
115 static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
116 {
117  LONGLONG stime = mf_to_mf_time(avctx, av_pts);
118  if (stime != MF_INVALID_TIME)
119  IMFSample_SetSampleTime(sample, stime);
120 }
121 
122 static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
123 {
124  return av_rescale_q(stime, MF_TIMEBASE, mf_get_tb(avctx));
125 }
126 
127 static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
128 {
129  LONGLONG pts;
130  HRESULT hr = IMFSample_GetSampleTime(sample, &pts);
131  if (FAILED(hr))
132  return AV_NOPTS_VALUE;
133  return mf_from_mf_time(avctx, pts);
134 }
135 
136 static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
137 {
138  MFContext *c = avctx->priv_data;
139  HRESULT hr;
140  UINT32 sz;
141 
142  if (avctx->codec_id != AV_CODEC_ID_MP3 && avctx->codec_id != AV_CODEC_ID_AC3) {
143  hr = IMFAttributes_GetBlobSize(type, &MF_MT_USER_DATA, &sz);
144  if (!FAILED(hr) && sz > 0) {
146  if (!avctx->extradata)
147  return AVERROR(ENOMEM);
148  avctx->extradata_size = sz;
149  hr = IMFAttributes_GetBlob(type, &MF_MT_USER_DATA, avctx->extradata, sz, NULL);
150  if (FAILED(hr))
151  return AVERROR_EXTERNAL;
152 
153  if (avctx->codec_id == AV_CODEC_ID_AAC && avctx->extradata_size >= 12) {
154  // Get rid of HEAACWAVEINFO (after wfx field, 12 bytes).
155  avctx->extradata_size = avctx->extradata_size - 12;
156  memmove(avctx->extradata, avctx->extradata + 12, avctx->extradata_size);
157  }
158  }
159  }
160 
161  // I don't know where it's documented that we need this. It happens with the
162  // MS mp3 encoder MFT. The idea for the workaround is taken from NAudio.
163  // (Certainly any lossy codec will have frames much smaller than 1 second.)
164  if (!c->out_info.cbSize && !c->out_stream_provides_samples) {
165  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
166  if (!FAILED(hr)) {
167  av_log(avctx, AV_LOG_VERBOSE, "MFT_OUTPUT_STREAM_INFO.cbSize set to 0, "
168  "assuming %d bytes instead.\n", (int)sz);
169  c->out_info.cbSize = sz;
170  }
171  }
172 
173  return 0;
174 }
175 
176 static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
177 {
178  HRESULT hr;
179  UINT32 sz;
180 
181  hr = IMFAttributes_GetBlobSize(type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
182  if (!FAILED(hr) && sz > 0) {
183  uint8_t *extradata = av_mallocz(sz + AV_INPUT_BUFFER_PADDING_SIZE);
184  if (!extradata)
185  return AVERROR(ENOMEM);
186  hr = IMFAttributes_GetBlob(type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz, NULL);
187  if (FAILED(hr)) {
188  av_free(extradata);
189  return AVERROR_EXTERNAL;
190  }
191  av_freep(&avctx->extradata);
192  avctx->extradata = extradata;
193  avctx->extradata_size = sz;
194  }
195 
196  return 0;
197 }
198 
200 {
201  MFContext *c = avctx->priv_data;
202  HRESULT hr;
203  IMFMediaType *type;
204  int ret;
205 
206  hr = IMFTransform_GetOutputCurrentType(c->mft, c->out_stream_id, &type);
207  if (FAILED(hr)) {
208  av_log(avctx, AV_LOG_ERROR, "could not get output type\n");
209  return AVERROR_EXTERNAL;
210  }
211 
212  av_log(avctx, AV_LOG_VERBOSE, "final output type:\n");
213  ff_media_type_dump(avctx, type);
214 
215  ret = 0;
216  if (c->is_video) {
217  ret = mf_encv_output_type_get(avctx, type);
218  } else if (c->is_audio) {
219  ret = mf_enca_output_type_get(avctx, type);
220  }
221 
222  if (ret < 0)
223  av_log(avctx, AV_LOG_ERROR, "output type not supported\n");
224 
225  IMFMediaType_Release(type);
226  return ret;
227 }
228 
229 static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
230 {
231  MFContext *c = avctx->priv_data;
232  HRESULT hr;
233  int ret;
234  DWORD len;
235  IMFMediaBuffer *buffer;
236  BYTE *data;
237  UINT64 t;
238  UINT32 t32;
239 
240  hr = IMFSample_GetTotalLength(sample, &len);
241  if (FAILED(hr))
242  return AVERROR_EXTERNAL;
243 
244  if ((ret = ff_get_encode_buffer(avctx, avpkt, len, 0)) < 0)
245  return ret;
246 
247  IMFSample_ConvertToContiguousBuffer(sample, &buffer);
248  if (FAILED(hr))
249  return AVERROR_EXTERNAL;
250 
251  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
252  if (FAILED(hr)) {
253  IMFMediaBuffer_Release(buffer);
254  return AVERROR_EXTERNAL;
255  }
256 
257  memcpy(avpkt->data, data, len);
258 
259  IMFMediaBuffer_Unlock(buffer);
260  IMFMediaBuffer_Release(buffer);
261 
262  avpkt->pts = avpkt->dts = mf_sample_get_pts(avctx, sample);
263 
264  hr = IMFAttributes_GetUINT32(sample, &MFSampleExtension_CleanPoint, &t32);
265  if (c->is_audio || (!FAILED(hr) && t32 != 0))
266  avpkt->flags |= AV_PKT_FLAG_KEY;
267 
268  hr = IMFAttributes_GetUINT64(sample, &MFSampleExtension_DecodeTimestamp, &t);
269  if (!FAILED(hr)) {
270  avpkt->dts = mf_from_mf_time(avctx, t);
271  // At least on Qualcomm's HEVC encoder on SD 835, the output dts
272  // starts from the input pts of the first frame, while the output pts
273  // is shifted forward. Therefore, shift the output values back so that
274  // the output pts matches the input.
275  if (c->reorder_delay == AV_NOPTS_VALUE)
276  c->reorder_delay = avpkt->pts - avpkt->dts;
277  avpkt->dts -= c->reorder_delay;
278  avpkt->pts -= c->reorder_delay;
279  }
280 
281  return 0;
282 }
283 
284 static IMFSample *mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
285 {
286  MFContext *c = avctx->priv_data;
287  size_t len;
288  size_t bps;
289  IMFSample *sample;
290 
291  bps = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels;
292  len = frame->nb_samples * bps;
293 
294  sample = ff_create_memory_sample(frame->data[0], len, c->in_info.cbAlignment);
295  if (sample)
296  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->nb_samples));
297  return sample;
298 }
299 
300 static IMFSample *mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
301 {
302  MFContext *c = avctx->priv_data;
303  IMFSample *sample;
304  IMFMediaBuffer *buffer;
305  BYTE *data;
306  HRESULT hr;
307  int ret;
308  int size;
309 
310  size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
311  if (size < 0)
312  return NULL;
313 
314  sample = ff_create_memory_sample(NULL, size, c->in_info.cbAlignment);
315  if (!sample)
316  return NULL;
317 
318  hr = IMFSample_GetBufferByIndex(sample, 0, &buffer);
319  if (FAILED(hr)) {
320  IMFSample_Release(sample);
321  return NULL;
322  }
323 
324  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
325  if (FAILED(hr)) {
326  IMFMediaBuffer_Release(buffer);
327  IMFSample_Release(sample);
328  return NULL;
329  }
330 
331  ret = av_image_copy_to_buffer((uint8_t *)data, size, (void *)frame->data, frame->linesize,
332  avctx->pix_fmt, avctx->width, avctx->height, 1);
333  IMFMediaBuffer_SetCurrentLength(buffer, size);
334  IMFMediaBuffer_Unlock(buffer);
335  IMFMediaBuffer_Release(buffer);
336  if (ret < 0) {
337  IMFSample_Release(sample);
338  return NULL;
339  }
340 
341  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->pkt_duration));
342 
343  return sample;
344 }
345 
346 static IMFSample *mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
347 {
348  MFContext *c = avctx->priv_data;
349  IMFSample *sample;
350 
351  if (c->is_audio) {
353  } else {
355  }
356 
357  if (sample)
358  mf_sample_set_pts(avctx, sample, frame->pts);
359 
360  return sample;
361 }
362 
363 static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
364 {
365  MFContext *c = avctx->priv_data;
366  HRESULT hr;
367  int ret;
368 
369  if (sample) {
370  if (c->async_events) {
371  if ((ret = mf_wait_events(avctx)) < 0)
372  return ret;
373  if (!c->async_need_input)
374  return AVERROR(EAGAIN);
375  }
376  if (!c->sample_sent)
377  IMFSample_SetUINT32(sample, &MFSampleExtension_Discontinuity, TRUE);
378  c->sample_sent = 1;
379  hr = IMFTransform_ProcessInput(c->mft, c->in_stream_id, sample, 0);
380  if (hr == MF_E_NOTACCEPTING) {
381  return AVERROR(EAGAIN);
382  } else if (FAILED(hr)) {
383  av_log(avctx, AV_LOG_ERROR, "failed processing input: %s\n", ff_hr_str(hr));
384  return AVERROR_EXTERNAL;
385  }
386  c->async_need_input = 0;
387  } else if (!c->draining) {
388  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
389  if (FAILED(hr))
390  av_log(avctx, AV_LOG_ERROR, "failed draining: %s\n", ff_hr_str(hr));
391  // Some MFTs (AC3) will send a frame after each drain command (???), so
392  // this is required to make draining actually terminate.
393  c->draining = 1;
394  c->async_need_input = 0;
395  } else {
396  return AVERROR_EOF;
397  }
398  return 0;
399 }
400 
401 static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
402 {
403  MFContext *c = avctx->priv_data;
404  HRESULT hr;
405  DWORD st;
406  MFT_OUTPUT_DATA_BUFFER out_buffers;
407  IMFSample *sample;
408  int ret = 0;
409 
410  while (1) {
411  *out_sample = NULL;
412  sample = NULL;
413 
414  if (c->async_events) {
415  if ((ret = mf_wait_events(avctx)) < 0)
416  return ret;
417  if (!c->async_have_output || c->draining_done) {
418  ret = 0;
419  break;
420  }
421  }
422 
423  if (!c->out_stream_provides_samples) {
424  sample = ff_create_memory_sample(NULL, c->out_info.cbSize, c->out_info.cbAlignment);
425  if (!sample)
426  return AVERROR(ENOMEM);
427  }
428 
429  out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
430  .dwStreamID = c->out_stream_id,
431  .pSample = sample,
432  };
433 
434  st = 0;
435  hr = IMFTransform_ProcessOutput(c->mft, 0, 1, &out_buffers, &st);
436 
437  if (out_buffers.pEvents)
438  IMFCollection_Release(out_buffers.pEvents);
439 
440  if (!FAILED(hr)) {
441  *out_sample = out_buffers.pSample;
442  ret = 0;
443  break;
444  }
445 
446  if (out_buffers.pSample)
447  IMFSample_Release(out_buffers.pSample);
448 
449  if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
450  if (c->draining)
451  c->draining_done = 1;
452  ret = 0;
453  } else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
454  av_log(avctx, AV_LOG_WARNING, "stream format change\n");
455  ret = mf_choose_output_type(avctx);
456  if (ret == 0) // we don't expect renegotiating the input type
458  if (ret > 0) {
459  ret = mf_setup_context(avctx);
460  if (ret >= 0) {
461  c->async_have_output = 0;
462  continue;
463  }
464  }
465  } else {
466  av_log(avctx, AV_LOG_ERROR, "failed processing output: %s\n", ff_hr_str(hr));
468  }
469 
470  break;
471  }
472 
473  c->async_have_output = 0;
474 
475  if (ret >= 0 && !*out_sample)
476  ret = c->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
477 
478  return ret;
479 }
480 
481 static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
482 {
483  MFContext *c = avctx->priv_data;
484  IMFSample *sample = NULL;
485  int ret;
486 
487  if (!c->frame->buf[0]) {
488  ret = ff_encode_get_frame(avctx, c->frame);
489  if (ret < 0 && ret != AVERROR_EOF)
490  return ret;
491  }
492 
493  if (c->frame->buf[0]) {
494  sample = mf_avframe_to_sample(avctx, c->frame);
495  if (!sample) {
496  av_frame_unref(c->frame);
497  return AVERROR(ENOMEM);
498  }
499  if (c->is_video && c->codec_api) {
500  if (c->frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
501  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
502  }
503  }
504 
505  ret = mf_send_sample(avctx, sample);
506  if (sample)
507  IMFSample_Release(sample);
508  if (ret != AVERROR(EAGAIN))
509  av_frame_unref(c->frame);
510  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
511  return ret;
512 
513  ret = mf_receive_sample(avctx, &sample);
514  if (ret < 0)
515  return ret;
516 
517  ret = mf_sample_to_avpacket(avctx, sample, avpkt);
518  IMFSample_Release(sample);
519 
520  return ret;
521 }
522 
523 // Most encoders seem to enumerate supported audio formats on the output types,
524 // at least as far as channel configuration and sample rate is concerned. Pick
525 // the one which seems to match best.
526 static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
527 {
528  MFContext *c = avctx->priv_data;
529  HRESULT hr;
530  UINT32 t;
531  GUID tg;
532  int64_t score = 0;
533 
534  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
535  if (!FAILED(hr) && t == avctx->sample_rate)
536  score |= 1LL << 32;
537 
538  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
539  if (!FAILED(hr) && t == avctx->channels)
540  score |= 2LL << 32;
541 
542  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
543  if (!FAILED(hr)) {
544  if (IsEqualGUID(&c->main_subtype, &tg))
545  score |= 4LL << 32;
546  }
547 
548  // Select the bitrate (lowest priority).
549  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
550  if (!FAILED(hr)) {
551  int diff = (int)t - avctx->bit_rate / 8;
552  if (diff >= 0) {
553  score |= (1LL << 31) - diff; // prefer lower bitrate
554  } else {
555  score |= (1LL << 30) + diff; // prefer higher bitrate
556  }
557  }
558 
559  hr = IMFAttributes_GetUINT32(type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
560  if (!FAILED(hr) && t != 0)
561  return -1;
562 
563  return score;
564 }
565 
566 static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
567 {
568  // (some decoders allow adjusting this freely, but it can also cause failure
569  // to set the output type - so it's commented for being too fragile)
570  //IMFAttributes_SetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, avctx->bit_rate / 8);
571  //IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
572 
573  return 0;
574 }
575 
576 static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
577 {
578  HRESULT hr;
579  UINT32 t;
580  int64_t score = 0;
581 
582  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
583  if (sformat == AV_SAMPLE_FMT_NONE)
584  return -1; // can not use
585 
586  if (sformat == avctx->sample_fmt)
587  score |= 1;
588 
589  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
590  if (!FAILED(hr) && t == avctx->sample_rate)
591  score |= 2;
592 
593  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
594  if (!FAILED(hr) && t == avctx->channels)
595  score |= 4;
596 
597  return score;
598 }
599 
600 static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
601 {
602  HRESULT hr;
603  UINT32 t;
604 
605  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
606  if (sformat != avctx->sample_fmt) {
607  av_log(avctx, AV_LOG_ERROR, "unsupported input sample format set\n");
608  return AVERROR(EINVAL);
609  }
610 
611  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
612  if (FAILED(hr) || t != avctx->sample_rate) {
613  av_log(avctx, AV_LOG_ERROR, "unsupported input sample rate set\n");
614  return AVERROR(EINVAL);
615  }
616 
617  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
618  if (FAILED(hr) || t != avctx->channels) {
619  av_log(avctx, AV_LOG_ERROR, "unsupported input channel number set\n");
620  return AVERROR(EINVAL);
621  }
622 
623  return 0;
624 }
625 
626 static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
627 {
628  MFContext *c = avctx->priv_data;
629  GUID tg;
630  HRESULT hr;
631  int score = -1;
632 
633  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
634  if (!FAILED(hr)) {
635  if (IsEqualGUID(&c->main_subtype, &tg))
636  score = 1;
637  }
638 
639  return score;
640 }
641 
642 static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
643 {
644  MFContext *c = avctx->priv_data;
646 
647  ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
648  IMFAttributes_SetUINT32(type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
649 
650  if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
651  framerate = avctx->framerate;
652  } else {
653  framerate = av_inv_q(avctx->time_base);
654  framerate.den *= avctx->ticks_per_frame;
655  }
656 
657  ff_MFSetAttributeRatio((IMFAttributes *)type, &MF_MT_FRAME_RATE, framerate.num, framerate.den);
658 
659  // (MS HEVC supports eAVEncH265VProfile_Main_420_8 only.)
660  if (avctx->codec_id == AV_CODEC_ID_H264) {
662  switch (avctx->profile) {
665  break;
668  break;
669  }
670  IMFAttributes_SetUINT32(type, &MF_MT_MPEG2_PROFILE, profile);
671  }
672 
673  IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
674 
675  // Note that some of the ICodecAPI options must be set before SetOutputType.
676  if (c->codec_api) {
677  if (avctx->bit_rate)
678  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMeanBitRate, FF_VAL_VT_UI4(avctx->bit_rate));
679 
680  if (c->opt_enc_rc >= 0)
681  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode, FF_VAL_VT_UI4(c->opt_enc_rc));
682 
683  if (c->opt_enc_quality >= 0)
684  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQuality, FF_VAL_VT_UI4(c->opt_enc_quality));
685 
686  // Always set the number of b-frames. Qualcomm's HEVC encoder on SD835
687  // defaults this to 1, and that setting is buggy with many of the
688  // rate control modes. (0 or 2 b-frames works fine with most rate
689  // control modes, but 2 seems buggy with the u_vbr mode.) Setting
690  // "scenario" to "camera_record" sets it in CFR mode (where the default
691  // is VFR), which makes the encoder avoid dropping frames.
692  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVDefaultBPictureCount, FF_VAL_VT_UI4(avctx->max_b_frames));
693  avctx->has_b_frames = avctx->max_b_frames > 0;
694 
695  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable, FF_VAL_VT_BOOL(1));
696 
697  if (c->opt_enc_scenario >= 0)
698  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVScenarioInfo, FF_VAL_VT_UI4(c->opt_enc_scenario));
699  }
700 
701  return 0;
702 }
703 
704 static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
705 {
706  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
707  if (pix_fmt != avctx->pix_fmt)
708  return -1; // can not use
709 
710  return 0;
711 }
712 
713 static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
714 {
715  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
716  if (pix_fmt != avctx->pix_fmt) {
717  av_log(avctx, AV_LOG_ERROR, "unsupported input pixel format set\n");
718  return AVERROR(EINVAL);
719  }
720 
721  //ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
722 
723  return 0;
724 }
725 
727 {
728  MFContext *c = avctx->priv_data;
729  HRESULT hr;
730  int ret;
731  IMFMediaType *out_type = NULL;
732  int64_t out_type_score = -1;
733  int out_type_index = -1;
734  int n;
735 
736  av_log(avctx, AV_LOG_VERBOSE, "output types:\n");
737  for (n = 0; ; n++) {
738  IMFMediaType *type;
739  int64_t score = -1;
740 
741  hr = IMFTransform_GetOutputAvailableType(c->mft, c->out_stream_id, n, &type);
742  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
743  break;
744  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
745  av_log(avctx, AV_LOG_VERBOSE, "(need to set input type)\n");
746  ret = 0;
747  goto done;
748  }
749  if (FAILED(hr)) {
750  av_log(avctx, AV_LOG_ERROR, "error getting output type: %s\n", ff_hr_str(hr));
752  goto done;
753  }
754 
755  av_log(avctx, AV_LOG_VERBOSE, "output type %d:\n", n);
756  ff_media_type_dump(avctx, type);
757 
758  if (c->is_video) {
759  score = mf_encv_output_score(avctx, type);
760  } else if (c->is_audio) {
761  score = mf_enca_output_score(avctx, type);
762  }
763 
764  if (score > out_type_score) {
765  if (out_type)
766  IMFMediaType_Release(out_type);
767  out_type = type;
768  out_type_score = score;
769  out_type_index = n;
770  IMFMediaType_AddRef(out_type);
771  }
772 
773  IMFMediaType_Release(type);
774  }
775 
776  if (out_type) {
777  av_log(avctx, AV_LOG_VERBOSE, "picking output type %d.\n", out_type_index);
778  } else {
779  hr = MFCreateMediaType(&out_type);
780  if (FAILED(hr)) {
781  ret = AVERROR(ENOMEM);
782  goto done;
783  }
784  }
785 
786  ret = 0;
787  if (c->is_video) {
788  ret = mf_encv_output_adjust(avctx, out_type);
789  } else if (c->is_audio) {
790  ret = mf_enca_output_adjust(avctx, out_type);
791  }
792 
793  if (ret >= 0) {
794  av_log(avctx, AV_LOG_VERBOSE, "setting output type:\n");
795  ff_media_type_dump(avctx, out_type);
796 
797  hr = IMFTransform_SetOutputType(c->mft, c->out_stream_id, out_type, 0);
798  if (!FAILED(hr)) {
799  ret = 1;
800  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
801  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set input type\n");
802  ret = 0;
803  } else {
804  av_log(avctx, AV_LOG_ERROR, "could not set output type (%s)\n", ff_hr_str(hr));
806  }
807  }
808 
809 done:
810  if (out_type)
811  IMFMediaType_Release(out_type);
812  return ret;
813 }
814 
816 {
817  MFContext *c = avctx->priv_data;
818  HRESULT hr;
819  int ret;
820  IMFMediaType *in_type = NULL;
821  int64_t in_type_score = -1;
822  int in_type_index = -1;
823  int n;
824 
825  av_log(avctx, AV_LOG_VERBOSE, "input types:\n");
826  for (n = 0; ; n++) {
827  IMFMediaType *type = NULL;
828  int64_t score = -1;
829 
830  hr = IMFTransform_GetInputAvailableType(c->mft, c->in_stream_id, n, &type);
831  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
832  break;
833  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
834  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 1)\n");
835  ret = 0;
836  goto done;
837  }
838  if (FAILED(hr)) {
839  av_log(avctx, AV_LOG_ERROR, "error getting input type: %s\n", ff_hr_str(hr));
841  goto done;
842  }
843 
844  av_log(avctx, AV_LOG_VERBOSE, "input type %d:\n", n);
845  ff_media_type_dump(avctx, type);
846 
847  if (c->is_video) {
848  score = mf_encv_input_score(avctx, type);
849  } else if (c->is_audio) {
850  score = mf_enca_input_score(avctx, type);
851  }
852 
853  if (score > in_type_score) {
854  if (in_type)
855  IMFMediaType_Release(in_type);
856  in_type = type;
857  in_type_score = score;
858  in_type_index = n;
859  IMFMediaType_AddRef(in_type);
860  }
861 
862  IMFMediaType_Release(type);
863  }
864 
865  if (in_type) {
866  av_log(avctx, AV_LOG_VERBOSE, "picking input type %d.\n", in_type_index);
867  } else {
868  // Some buggy MFTs (WMA encoder) fail to return MF_E_TRANSFORM_TYPE_NOT_SET.
869  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 2)\n");
870  ret = 0;
871  goto done;
872  }
873 
874  ret = 0;
875  if (c->is_video) {
876  ret = mf_encv_input_adjust(avctx, in_type);
877  } else if (c->is_audio) {
878  ret = mf_enca_input_adjust(avctx, in_type);
879  }
880 
881  if (ret >= 0) {
882  av_log(avctx, AV_LOG_VERBOSE, "setting input type:\n");
883  ff_media_type_dump(avctx, in_type);
884 
885  hr = IMFTransform_SetInputType(c->mft, c->in_stream_id, in_type, 0);
886  if (!FAILED(hr)) {
887  ret = 1;
888  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
889  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set output type\n");
890  ret = 0;
891  } else {
892  av_log(avctx, AV_LOG_ERROR, "could not set input type (%s)\n", ff_hr_str(hr));
894  }
895  }
896 
897 done:
898  if (in_type)
899  IMFMediaType_Release(in_type);
900  return ret;
901 }
902 
904 {
905  // This follows steps 1-5 on:
906  // https://msdn.microsoft.com/en-us/library/windows/desktop/aa965264(v=vs.85).aspx
907  // If every MFT implementer does this correctly, this loop should at worst
908  // be repeated once.
909  int need_input = 1, need_output = 1;
910  int n;
911  for (n = 0; n < 2 && (need_input || need_output); n++) {
912  int ret;
913  ret = mf_choose_input_type(avctx);
914  if (ret < 0)
915  return ret;
916  need_input = ret < 1;
917  ret = mf_choose_output_type(avctx);
918  if (ret < 0)
919  return ret;
920  need_output = ret < 1;
921  }
922  if (need_input || need_output) {
923  av_log(avctx, AV_LOG_ERROR, "format negotiation failed (%d/%d)\n",
924  need_input, need_output);
925  return AVERROR_EXTERNAL;
926  }
927  return 0;
928 }
929 
931 {
932  MFContext *c = avctx->priv_data;
933  HRESULT hr;
934  int ret;
935 
936  hr = IMFTransform_GetInputStreamInfo(c->mft, c->in_stream_id, &c->in_info);
937  if (FAILED(hr))
938  return AVERROR_EXTERNAL;
939  av_log(avctx, AV_LOG_VERBOSE, "in_info: size=%d, align=%d\n",
940  (int)c->in_info.cbSize, (int)c->in_info.cbAlignment);
941 
942  hr = IMFTransform_GetOutputStreamInfo(c->mft, c->out_stream_id, &c->out_info);
943  if (FAILED(hr))
944  return AVERROR_EXTERNAL;
945  c->out_stream_provides_samples =
946  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
947  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
948  av_log(avctx, AV_LOG_VERBOSE, "out_info: size=%d, align=%d%s\n",
949  (int)c->out_info.cbSize, (int)c->out_info.cbAlignment,
950  c->out_stream_provides_samples ? " (provides samples)" : "");
951 
952  if ((ret = mf_output_type_get(avctx)) < 0)
953  return ret;
954 
955  return 0;
956 }
957 
958 static int mf_unlock_async(AVCodecContext *avctx)
959 {
960  MFContext *c = avctx->priv_data;
961  HRESULT hr;
962  IMFAttributes *attrs;
963  UINT32 v;
964  int res = AVERROR_EXTERNAL;
965 
966  // For hw encoding we unfortunately need to use async mode, otherwise
967  // play it safe and avoid it.
968  if (!(c->is_video && c->opt_enc_hw))
969  return 0;
970 
971  hr = IMFTransform_GetAttributes(c->mft, &attrs);
972  if (FAILED(hr)) {
973  av_log(avctx, AV_LOG_ERROR, "error retrieving MFT attributes: %s\n", ff_hr_str(hr));
974  goto err;
975  }
976 
977  hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
978  if (FAILED(hr)) {
979  av_log(avctx, AV_LOG_ERROR, "error querying async: %s\n", ff_hr_str(hr));
980  goto err;
981  }
982 
983  if (!v) {
984  av_log(avctx, AV_LOG_ERROR, "hardware MFT is not async\n");
985  goto err;
986  }
987 
988  hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
989  if (FAILED(hr)) {
990  av_log(avctx, AV_LOG_ERROR, "could not set async unlock: %s\n", ff_hr_str(hr));
991  goto err;
992  }
993 
994  hr = IMFTransform_QueryInterface(c->mft, &IID_IMFMediaEventGenerator, (void **)&c->async_events);
995  if (FAILED(hr)) {
996  av_log(avctx, AV_LOG_ERROR, "could not get async interface\n");
997  goto err;
998  }
999 
1000  res = 0;
1001 
1002 err:
1003  IMFAttributes_Release(attrs);
1004  return res;
1005 }
1006 
1007 static int mf_create(void *log, IMFTransform **mft, const AVCodec *codec, int use_hw)
1008 {
1009  int is_audio = codec->type == AVMEDIA_TYPE_AUDIO;
1010  const CLSID *subtype = ff_codec_to_mf_subtype(codec->id);
1011  MFT_REGISTER_TYPE_INFO reg = {0};
1012  GUID category;
1013  int ret;
1014 
1015  *mft = NULL;
1016 
1017  if (!subtype)
1018  return AVERROR(ENOSYS);
1019 
1020  reg.guidSubtype = *subtype;
1021 
1022  if (is_audio) {
1023  reg.guidMajorType = MFMediaType_Audio;
1024  category = MFT_CATEGORY_AUDIO_ENCODER;
1025  } else {
1026  reg.guidMajorType = MFMediaType_Video;
1027  category = MFT_CATEGORY_VIDEO_ENCODER;
1028  }
1029 
1030  if ((ret = ff_instantiate_mf(log, category, NULL, &reg, use_hw, mft)) < 0)
1031  return ret;
1032 
1033  return 0;
1034 }
1035 
1036 static int mf_init(AVCodecContext *avctx)
1037 {
1038  MFContext *c = avctx->priv_data;
1039  HRESULT hr;
1040  int ret;
1041  const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id);
1042  int use_hw = 0;
1043 
1044  c->frame = av_frame_alloc();
1045  if (!c->frame)
1046  return AVERROR(ENOMEM);
1047 
1048  c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO;
1049  c->is_video = !c->is_audio;
1050  c->reorder_delay = AV_NOPTS_VALUE;
1051 
1052  if (c->is_video && c->opt_enc_hw)
1053  use_hw = 1;
1054 
1055  if (!subtype)
1056  return AVERROR(ENOSYS);
1057 
1058  c->main_subtype = *subtype;
1059 
1060  if ((ret = mf_create(avctx, &c->mft, avctx->codec, use_hw)) < 0)
1061  return ret;
1062 
1063  if ((ret = mf_unlock_async(avctx)) < 0)
1064  return ret;
1065 
1066  hr = IMFTransform_QueryInterface(c->mft, &IID_ICodecAPI, (void **)&c->codec_api);
1067  if (!FAILED(hr))
1068  av_log(avctx, AV_LOG_VERBOSE, "MFT supports ICodecAPI.\n");
1069 
1070 
1071  hr = IMFTransform_GetStreamIDs(c->mft, 1, &c->in_stream_id, 1, &c->out_stream_id);
1072  if (hr == E_NOTIMPL) {
1073  c->in_stream_id = c->out_stream_id = 0;
1074  } else if (FAILED(hr)) {
1075  av_log(avctx, AV_LOG_ERROR, "could not get stream IDs (%s)\n", ff_hr_str(hr));
1076  return AVERROR_EXTERNAL;
1077  }
1078 
1079  if ((ret = mf_negotiate_types(avctx)) < 0)
1080  return ret;
1081 
1082  if ((ret = mf_setup_context(avctx)) < 0)
1083  return ret;
1084 
1085  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
1086  if (FAILED(hr)) {
1087  av_log(avctx, AV_LOG_ERROR, "could not start streaming (%s)\n", ff_hr_str(hr));
1088  return AVERROR_EXTERNAL;
1089  }
1090 
1091  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
1092  if (FAILED(hr)) {
1093  av_log(avctx, AV_LOG_ERROR, "could not start stream (%s)\n", ff_hr_str(hr));
1094  return AVERROR_EXTERNAL;
1095  }
1096 
1097  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && c->async_events &&
1098  c->is_video && !avctx->extradata) {
1099  int sleep = 10000, total = 0;
1100  av_log(avctx, AV_LOG_VERBOSE, "Awaiting extradata\n");
1101  while (total < 70*1000) {
1102  // The Qualcomm H264 encoder on SD835 doesn't provide extradata
1103  // immediately, but it becomes available soon after init (without
1104  // any waitable event). In practice, it's available after less
1105  // than 10 ms, but wait for up to 70 ms before giving up.
1106  // Some encoders (Qualcomm's HEVC encoder on SD835, some versions
1107  // of the QSV H264 encoder at least) don't provide extradata this
1108  // way at all, not even after encoding a frame - it's only
1109  // available prepended to frames.
1110  av_usleep(sleep);
1111  total += sleep;
1112  mf_output_type_get(avctx);
1113  if (avctx->extradata)
1114  break;
1115  sleep *= 2;
1116  }
1117  av_log(avctx, AV_LOG_VERBOSE, "%s extradata in %d ms\n",
1118  avctx->extradata ? "Got" : "Didn't get", total / 1000);
1119  }
1120 
1121  return 0;
1122 }
1123 
1124 static int mf_close(AVCodecContext *avctx)
1125 {
1126  MFContext *c = avctx->priv_data;
1127 
1128  if (c->codec_api)
1129  ICodecAPI_Release(c->codec_api);
1130 
1131  if (c->async_events)
1132  IMFMediaEventGenerator_Release(c->async_events);
1133 
1134  ff_free_mf(&c->mft);
1135 
1136  av_frame_free(&c->frame);
1137 
1138  av_freep(&avctx->extradata);
1139  avctx->extradata_size = 0;
1140 
1141  return 0;
1142 }
1143 
1144 #define OFFSET(x) offsetof(MFContext, x)
1145 
1146 #define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, EXTRA) \
1147  static const AVClass ff_ ## NAME ## _mf_encoder_class = { \
1148  .class_name = #NAME "_mf", \
1149  .item_name = av_default_item_name, \
1150  .option = OPTS, \
1151  .version = LIBAVUTIL_VERSION_INT, \
1152  }; \
1153  const AVCodec ff_ ## NAME ## _mf_encoder = { \
1154  .priv_class = &ff_ ## NAME ## _mf_encoder_class, \
1155  .name = #NAME "_mf", \
1156  .long_name = NULL_IF_CONFIG_SMALL(#ID " via MediaFoundation"), \
1157  .type = AVMEDIA_TYPE_ ## MEDIATYPE, \
1158  .id = AV_CODEC_ID_ ## ID, \
1159  .priv_data_size = sizeof(MFContext), \
1160  .init = mf_init, \
1161  .close = mf_close, \
1162  .receive_packet = mf_receive_packet, \
1163  EXTRA \
1164  .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1165  AV_CODEC_CAP_DR1, \
1166  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | \
1167  FF_CODEC_CAP_INIT_CLEANUP, \
1168  };
1169 
1170 #define AFMTS \
1171  .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, \
1172  AV_SAMPLE_FMT_NONE },
1173 
1174 MF_ENCODER(AUDIO, aac, AAC, NULL, AFMTS);
1175 MF_ENCODER(AUDIO, ac3, AC3, NULL, AFMTS);
1176 MF_ENCODER(AUDIO, mp3, MP3, NULL, AFMTS);
1177 
1178 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1179 static const AVOption venc_opts[] = {
1180  {"rate_control", "Select rate control mode", OFFSET(opt_enc_rc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, "rate_control"},
1181  { "default", "Default mode", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, "rate_control"},
1182  { "cbr", "CBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_CBR}, 0, 0, VE, "rate_control"},
1183  { "pc_vbr", "Peak constrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_PeakConstrainedVBR}, 0, 0, VE, "rate_control"},
1184  { "u_vbr", "Unconstrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_UnconstrainedVBR}, 0, 0, VE, "rate_control"},
1185  { "quality", "Quality mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_Quality}, 0, 0, VE, "rate_control" },
1186  // The following rate_control modes require Windows 8.
1187  { "ld_vbr", "Low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_LowDelayVBR}, 0, 0, VE, "rate_control"},
1188  { "g_vbr", "Global VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalVBR}, 0, 0, VE, "rate_control" },
1189  { "gld_vbr", "Global low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR}, 0, 0, VE, "rate_control"},
1190 
1191  {"scenario", "Select usage scenario", OFFSET(opt_enc_scenario), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, "scenario"},
1192  { "default", "Default scenario", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, "scenario"},
1193  { "display_remoting", "Display remoting", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemoting}, 0, 0, VE, "scenario"},
1194  { "video_conference", "Video conference", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_VideoConference}, 0, 0, VE, "scenario"},
1195  { "archive", "Archive", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_Archive}, 0, 0, VE, "scenario"},
1196  { "live_streaming", "Live streaming", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_LiveStreaming}, 0, 0, VE, "scenario"},
1197  { "camera_record", "Camera record", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_CameraRecord}, 0, 0, VE, "scenario"},
1198  { "display_remoting_with_feature_map", "Display remoting with feature map", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap}, 0, 0, VE, "scenario"},
1199 
1200  {"quality", "Quality", OFFSET(opt_enc_quality), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 100, VE},
1201  {"hw_encoding", "Force hardware encoding", OFFSET(opt_enc_hw), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VE},
1202  {NULL}
1203 };
1204 
1205 #define VFMTS \
1206  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1207  AV_PIX_FMT_YUV420P, \
1208  AV_PIX_FMT_NONE },
1209 
1210 MF_ENCODER(VIDEO, h264, H264, venc_opts, VFMTS);
1211 MF_ENCODER(VIDEO, hevc, HEVC, venc_opts, VFMTS);
AVCodec
AVCodec.
Definition: codec.h:202
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
need_output
static int need_output(void)
Definition: ffmpeg.c:3915
ff_hr_str
#define ff_hr_str(hr)
Definition: mf_utils.h:145
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:426
FF_VAL_VT_UI4
#define FF_VAL_VT_UI4(v)
Definition: mf_utils.h:150
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:992
venc_opts
static const AVOption venc_opts[]
Definition: mfenc.c:1179
mf_v_avframe_to_sample
static IMFSample * mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:300
mf_choose_input_type
static int mf_choose_input_type(AVCodecContext *avctx)
Definition: mfenc.c:815
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_METransformNeedInput
@ ff_METransformNeedInput
Definition: mf_utils.h:128
ff_codec_to_mf_subtype
const CLSID * ff_codec_to_mf_subtype(enum AVCodecID codec)
Definition: mf_utils.c:539
mf_enca_output_score
static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:526
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
profile
mfxU16 profile
Definition: qsvenc.c:45
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
MFContext::opt_enc_hw
int opt_enc_hw
Definition: mfenc.c:52
MFContext::av_class
AVClass * av_class
Definition: mfenc.c:33
mf_receive_sample
static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
Definition: mfenc.c:401
mf_enca_output_type_get
static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:136
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
MFContext::sample_sent
int sample_sent
Definition: mfenc.c:44
ff_eAVEncCommonRateControlMode_Quality
@ ff_eAVEncCommonRateControlMode_Quality
Definition: mf_utils.h:107
MF_ENCODER
#define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, EXTRA)
Definition: mfenc.c:1146
ff_eAVEncCommonRateControlMode_CBR
@ ff_eAVEncCommonRateControlMode_CBR
Definition: mf_utils.h:104
AVOption
AVOption.
Definition: opt.h:247
encode.h
data
const char data[16]
Definition: mxf.c:143
mf_encv_output_score
static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:626
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
category
category
Definition: openal-dec.c:248
MFContext::draining_done
int draining_done
Definition: mfenc.c:43
ff_MFSetAttributeSize
HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid, UINT32 uw, UINT32 uh)
Definition: mf_utils.c:40
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:425
ff_eAVEncH264VProfile_High
@ ff_eAVEncH264VProfile_High
Definition: mf_utils.h:141
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:268
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1710
framerate
int framerate
Definition: h264_levels.c:65
MFContext::out_stream_id
DWORD out_stream_id
Definition: mfenc.c:39
MFContext::async_marker
int async_marker
Definition: mfenc.c:45
mf_avframe_to_sample
static IMFSample * mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:346
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
ff_media_type_to_sample_fmt
enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type)
Definition: mf_utils.c:146
MFContext::async_need_input
int async_need_input
Definition: mfenc.c:45
OFFSET
#define OFFSET(x)
Definition: mfenc.c:1144
mf_receive_packet
static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: mfenc.c:481
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
MFContext::is_audio
int is_audio
Definition: mfenc.c:35
FF_PROFILE_H264_HIGH
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:1568
mf_utils.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
VFMTS
#define VFMTS
Definition: mfenc.c:1205
pts
static int64_t pts
Definition: transcode_aac.c:653
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:424
ff_eAVEncCommonRateControlMode_GlobalVBR
@ ff_eAVEncCommonRateControlMode_GlobalVBR
Definition: mf_utils.h:109
AVRational::num
int num
Numerator.
Definition: rational.h:59
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
mf_setup_context
static int mf_setup_context(AVCodecContext *avctx)
Definition: mfenc.c:930
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
MFContext::opt_enc_rc
int opt_enc_rc
Definition: mfenc.c:49
mf_create
static int mf_create(void *log, IMFTransform **mft, const AVCodec *codec, int use_hw)
Definition: mfenc.c:1007
MFContext::reorder_delay
int64_t reorder_delay
Definition: mfenc.c:46
ff_instantiate_mf
int ff_instantiate_mf(void *log, GUID category, MFT_REGISTER_TYPE_INFO *in_type, MFT_REGISTER_TYPE_INFO *out_type, int use_hw, IMFTransform **res)
Definition: mf_utils.c:582
mf_encv_output_adjust
static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:642
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:515
MFContext::opt_enc_scenario
int opt_enc_scenario
Definition: mfenc.c:51
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
MFContext::codec_api
ICodecAPI * codec_api
Definition: mfenc.c:47
MFContext::in_info
MFT_INPUT_STREAM_INFO in_info
Definition: mfenc.c:40
MFContext::out_stream_provides_samples
int out_stream_provides_samples
Definition: mfenc.c:42
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
MFContext::frame
AVFrame * frame
Definition: mfenc.c:34
if
if(ret)
Definition: filter_design.txt:179
ff_eAVScenarioInfo_LiveStreaming
@ ff_eAVScenarioInfo_LiveStreaming
Definition: mf_utils.h:118
ff_MFSetAttributeRatio
#define ff_MFSetAttributeRatio
Definition: mf_utils.c:47
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
@ ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
Definition: mf_utils.h:105
AVCodec::type
enum AVMediaType type
Definition: codec.h:215
mf_enca_input_score
static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:576
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
MF_INVALID_TIME
#define MF_INVALID_TIME
Definition: mfenc.c:60
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:433
ff_free_mf
void ff_free_mf(IMFTransform **mft)
Definition: mf_utils.c:674
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_METransformMarker
@ ff_METransformMarker
Definition: mf_utils.h:131
ff_METransformDrainComplete
@ ff_METransformDrainComplete
Definition: mf_utils.h:130
mf_enca_input_adjust
static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:600
time.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_AAC
@ AV_CODEC_ID_AAC
Definition: codec_id.h:425
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:506
ff_media_type_to_pix_fmt
enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type)
Definition: mf_utils.c:190
mf_a_avframe_to_sample
static IMFSample * mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:284
ff_eAVScenarioInfo_Archive
@ ff_eAVScenarioInfo_Archive
Definition: mf_utils.h:117
bps
unsigned bps
Definition: movenc.c:1596
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1000
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
MFContext::is_video
int is_video
Definition: mfenc.c:35
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_eAVEncH264VProfile_Base
@ ff_eAVEncH264VProfile_Base
Definition: mf_utils.h:139
ff_eAVScenarioInfo_DisplayRemoting
@ ff_eAVScenarioInfo_DisplayRemoting
Definition: mf_utils.h:115
MFContext::opt_enc_quality
int opt_enc_quality
Definition: mfenc.c:50
MFContext::async_events
IMFMediaEventGenerator * async_events
Definition: mfenc.c:38
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
ff_eAVEncCommonRateControlMode_UnconstrainedVBR
@ ff_eAVEncCommonRateControlMode_UnconstrainedVBR
Definition: mf_utils.h:106
MF_TIMEBASE
#define MF_TIMEBASE
Definition: mfenc.c:58
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
@ ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
Definition: mf_utils.h:120
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
tg
#define tg
Definition: regdef.h:74
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:993
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
mf_get_tb
static AVRational mf_get_tb(AVCodecContext *avctx)
Definition: mfenc.c:101
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
mf_send_sample
static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:363
MFContext::in_stream_id
DWORD in_stream_id
Definition: mfenc.c:39
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
MFContext::async_have_output
int async_have_output
Definition: mfenc.c:45
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
VE
#define VE
Definition: mfenc.c:1178
MFContext::out_info
MFT_OUTPUT_STREAM_INFO out_info
Definition: mfenc.c:41
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:437
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
MFContext
Definition: mfenc.c:32
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
mf_negotiate_types
static int mf_negotiate_types(AVCodecContext *avctx)
Definition: mfenc.c:903
mf_enca_output_adjust
static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:566
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
mf_sample_set_pts
static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
Definition: mfenc.c:115
mf_to_mf_time
static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
Definition: mfenc.c:108
mf_from_mf_time
static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
Definition: mfenc.c:122
mf_init
static int mf_init(AVCodecContext *avctx)
Definition: mfenc.c:1036
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:383
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:78
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1525
mf_close
static int mf_close(AVCodecContext *avctx)
Definition: mfenc.c:1124
mf_encv_input_adjust
static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:713
FF_PROFILE_H264_MAIN
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:1566
MFContext::draining
int draining
Definition: mfenc.c:43
ff_eAVScenarioInfo_CameraRecord
@ ff_eAVScenarioInfo_CameraRecord
Definition: mf_utils.h:119
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:391
ff_METransformHaveOutput
@ ff_METransformHaveOutput
Definition: mf_utils.h:129
FF_VAL_VT_BOOL
#define FF_VAL_VT_BOOL(v)
Definition: mf_utils.h:151
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:655
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:157
mf_choose_output_type
static int mf_choose_output_type(AVCodecContext *avctx)
Definition: mfenc.c:726
ff_eAVScenarioInfo_VideoConference
@ ff_eAVScenarioInfo_VideoConference
Definition: mf_utils.h:116
AFMTS
#define AFMTS
Definition: mfenc.c:1170
ff_media_type_dump
void ff_media_type_dump(void *log, IMFMediaType *type)
Definition: mf_utils.c:534
mf_encv_input_score
static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:704
mf_output_type_get
static int mf_output_type_get(AVCodecContext *avctx)
Definition: mfenc.c:199
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
mf_sample_to_avpacket
static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
Definition: mfenc.c:229
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
imgutils.h
MFContext::mft
IMFTransform * mft
Definition: mfenc.c:37
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
mf_unlock_async
static int mf_unlock_async(AVCodecContext *avctx)
Definition: mfenc.c:958
ff_create_memory_sample
IMFSample * ff_create_memory_sample(void *fill_data, size_t size, size_t align)
Definition: mf_utils.c:109
ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
@ ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
Definition: mf_utils.h:110
int
int
Definition: ffmpeg_filter.c:156
mf_wait_events
static int mf_wait_events(AVCodecContext *avctx)
Definition: mfenc.c:62
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
ff_eAVEncH264VProfile_Main
@ ff_eAVEncH264VProfile_Main
Definition: mf_utils.h:140
mf_sample_get_pts
static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:127
MFContext::main_subtype
GUID main_subtype
Definition: mfenc.c:36
ff_eAVEncCommonRateControlMode_LowDelayVBR
@ ff_eAVEncCommonRateControlMode_LowDelayVBR
Definition: mf_utils.h:108
mf_encv_output_type_get
static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:176