FFmpeg
mfenc.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #define COBJMACROS
20 #if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
21 #undef _WIN32_WINNT
22 #define _WIN32_WINNT 0x0602
23 #endif
24 
25 #include "encode.h"
26 #include "mf_utils.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/time.h"
31 #include "codec_internal.h"
32 #include "internal.h"
33 #include "compat/w32dlfcn.h"
34 
35 typedef struct MFContext {
37  HMODULE library;
42  IMFTransform *mft;
43  IMFMediaEventGenerator *async_events;
45  MFT_INPUT_STREAM_INFO in_info;
46  MFT_OUTPUT_STREAM_INFO out_info;
52  ICodecAPI *codec_api;
53  // set by AVOption
58 } MFContext;
59 
60 static int mf_choose_output_type(AVCodecContext *avctx);
61 static int mf_setup_context(AVCodecContext *avctx);
62 
63 #define MF_TIMEBASE (AVRational){1, 10000000}
64 // Sentinel value only used by us.
65 #define MF_INVALID_TIME AV_NOPTS_VALUE
66 
67 static int mf_wait_events(AVCodecContext *avctx)
68 {
69  MFContext *c = avctx->priv_data;
70 
71  if (!c->async_events)
72  return 0;
73 
74  while (!(c->async_need_input || c->async_have_output || c->draining_done || c->async_marker)) {
75  IMFMediaEvent *ev = NULL;
76  MediaEventType ev_id = 0;
77  HRESULT hr = IMFMediaEventGenerator_GetEvent(c->async_events, 0, &ev);
78  if (FAILED(hr)) {
79  av_log(avctx, AV_LOG_ERROR, "IMFMediaEventGenerator_GetEvent() failed: %s\n",
80  ff_hr_str(hr));
81  return AVERROR_EXTERNAL;
82  }
83  IMFMediaEvent_GetType(ev, &ev_id);
84  switch (ev_id) {
86  if (!c->draining)
87  c->async_need_input = 1;
88  break;
90  c->async_have_output = 1;
91  break;
93  c->draining_done = 1;
94  break;
96  c->async_marker = 1;
97  break;
98  default: ;
99  }
100  IMFMediaEvent_Release(ev);
101  }
102 
103  return 0;
104 }
105 
107 {
108  if (avctx->time_base.num > 0 && avctx->time_base.den > 0)
109  return avctx->time_base;
110  return MF_TIMEBASE;
111 }
112 
113 static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
114 {
115  if (av_pts == AV_NOPTS_VALUE)
116  return MF_INVALID_TIME;
117  return av_rescale_q(av_pts, mf_get_tb(avctx), MF_TIMEBASE);
118 }
119 
120 static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
121 {
122  LONGLONG stime = mf_to_mf_time(avctx, av_pts);
123  if (stime != MF_INVALID_TIME)
124  IMFSample_SetSampleTime(sample, stime);
125 }
126 
127 static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
128 {
129  return av_rescale_q(stime, MF_TIMEBASE, mf_get_tb(avctx));
130 }
131 
132 static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
133 {
134  LONGLONG pts;
135  HRESULT hr = IMFSample_GetSampleTime(sample, &pts);
136  if (FAILED(hr))
137  return AV_NOPTS_VALUE;
138  return mf_from_mf_time(avctx, pts);
139 }
140 
141 static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
142 {
143  MFContext *c = avctx->priv_data;
144  HRESULT hr;
145  UINT32 sz;
146 
147  if (avctx->codec_id != AV_CODEC_ID_MP3 && avctx->codec_id != AV_CODEC_ID_AC3) {
148  hr = IMFAttributes_GetBlobSize(type, &MF_MT_USER_DATA, &sz);
149  if (!FAILED(hr) && sz > 0) {
151  if (!avctx->extradata)
152  return AVERROR(ENOMEM);
153  avctx->extradata_size = sz;
154  hr = IMFAttributes_GetBlob(type, &MF_MT_USER_DATA, avctx->extradata, sz, NULL);
155  if (FAILED(hr))
156  return AVERROR_EXTERNAL;
157 
158  if (avctx->codec_id == AV_CODEC_ID_AAC && avctx->extradata_size >= 12) {
159  // Get rid of HEAACWAVEINFO (after wfx field, 12 bytes).
160  avctx->extradata_size = avctx->extradata_size - 12;
161  memmove(avctx->extradata, avctx->extradata + 12, avctx->extradata_size);
162  }
163  }
164  }
165 
166  // I don't know where it's documented that we need this. It happens with the
167  // MS mp3 encoder MFT. The idea for the workaround is taken from NAudio.
168  // (Certainly any lossy codec will have frames much smaller than 1 second.)
169  if (!c->out_info.cbSize && !c->out_stream_provides_samples) {
170  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
171  if (!FAILED(hr)) {
172  av_log(avctx, AV_LOG_VERBOSE, "MFT_OUTPUT_STREAM_INFO.cbSize set to 0, "
173  "assuming %d bytes instead.\n", (int)sz);
174  c->out_info.cbSize = sz;
175  }
176  }
177 
178  return 0;
179 }
180 
181 static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
182 {
183  HRESULT hr;
184  UINT32 sz;
185 
186  hr = IMFAttributes_GetBlobSize(type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
187  if (!FAILED(hr) && sz > 0) {
188  uint8_t *extradata = av_mallocz(sz + AV_INPUT_BUFFER_PADDING_SIZE);
189  if (!extradata)
190  return AVERROR(ENOMEM);
191  hr = IMFAttributes_GetBlob(type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz, NULL);
192  if (FAILED(hr)) {
193  av_free(extradata);
194  return AVERROR_EXTERNAL;
195  }
196  av_freep(&avctx->extradata);
197  avctx->extradata = extradata;
198  avctx->extradata_size = sz;
199  }
200 
201  return 0;
202 }
203 
205 {
206  MFContext *c = avctx->priv_data;
207  HRESULT hr;
208  IMFMediaType *type;
209  int ret;
210 
211  hr = IMFTransform_GetOutputCurrentType(c->mft, c->out_stream_id, &type);
212  if (FAILED(hr)) {
213  av_log(avctx, AV_LOG_ERROR, "could not get output type\n");
214  return AVERROR_EXTERNAL;
215  }
216 
217  av_log(avctx, AV_LOG_VERBOSE, "final output type:\n");
218  ff_media_type_dump(avctx, type);
219 
220  ret = 0;
221  if (c->is_video) {
222  ret = mf_encv_output_type_get(avctx, type);
223  } else if (c->is_audio) {
224  ret = mf_enca_output_type_get(avctx, type);
225  }
226 
227  if (ret < 0)
228  av_log(avctx, AV_LOG_ERROR, "output type not supported\n");
229 
230  IMFMediaType_Release(type);
231  return ret;
232 }
233 
234 static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
235 {
236  MFContext *c = avctx->priv_data;
237  HRESULT hr;
238  int ret;
239  DWORD len;
240  IMFMediaBuffer *buffer;
241  BYTE *data;
242  UINT64 t;
243  UINT32 t32;
244 
245  hr = IMFSample_GetTotalLength(sample, &len);
246  if (FAILED(hr))
247  return AVERROR_EXTERNAL;
248 
249  if ((ret = ff_get_encode_buffer(avctx, avpkt, len, 0)) < 0)
250  return ret;
251 
252  hr = IMFSample_ConvertToContiguousBuffer(sample, &buffer);
253  if (FAILED(hr))
254  return AVERROR_EXTERNAL;
255 
256  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
257  if (FAILED(hr)) {
258  IMFMediaBuffer_Release(buffer);
259  return AVERROR_EXTERNAL;
260  }
261 
262  memcpy(avpkt->data, data, len);
263 
264  IMFMediaBuffer_Unlock(buffer);
265  IMFMediaBuffer_Release(buffer);
266 
267  avpkt->pts = avpkt->dts = mf_sample_get_pts(avctx, sample);
268 
269  hr = IMFAttributes_GetUINT32(sample, &MFSampleExtension_CleanPoint, &t32);
270  if (c->is_audio || (!FAILED(hr) && t32 != 0))
271  avpkt->flags |= AV_PKT_FLAG_KEY;
272 
273  hr = IMFAttributes_GetUINT64(sample, &MFSampleExtension_DecodeTimestamp, &t);
274  if (!FAILED(hr)) {
275  avpkt->dts = mf_from_mf_time(avctx, t);
276  // At least on Qualcomm's HEVC encoder on SD 835, the output dts
277  // starts from the input pts of the first frame, while the output pts
278  // is shifted forward. Therefore, shift the output values back so that
279  // the output pts matches the input.
280  if (c->reorder_delay == AV_NOPTS_VALUE)
281  c->reorder_delay = avpkt->pts - avpkt->dts;
282  avpkt->dts -= c->reorder_delay;
283  avpkt->pts -= c->reorder_delay;
284  }
285 
286  return 0;
287 }
288 
289 static IMFSample *mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
290 {
291  MFContext *c = avctx->priv_data;
292  size_t len;
293  size_t bps;
294  IMFSample *sample;
295 
297  len = frame->nb_samples * bps;
298 
299  sample = ff_create_memory_sample(&c->functions, frame->data[0], len,
300  c->in_info.cbAlignment);
301  if (sample)
302  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->nb_samples));
303  return sample;
304 }
305 
306 static IMFSample *mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
307 {
308  MFContext *c = avctx->priv_data;
309  IMFSample *sample;
310  IMFMediaBuffer *buffer;
311  BYTE *data;
312  HRESULT hr;
313  int ret;
314  int size;
315 
316  size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
317  if (size < 0)
318  return NULL;
319 
320  sample = ff_create_memory_sample(&c->functions, NULL, size,
321  c->in_info.cbAlignment);
322  if (!sample)
323  return NULL;
324 
325  hr = IMFSample_GetBufferByIndex(sample, 0, &buffer);
326  if (FAILED(hr)) {
327  IMFSample_Release(sample);
328  return NULL;
329  }
330 
331  hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
332  if (FAILED(hr)) {
333  IMFMediaBuffer_Release(buffer);
334  IMFSample_Release(sample);
335  return NULL;
336  }
337 
338  ret = av_image_copy_to_buffer((uint8_t *)data, size, (void *)frame->data, frame->linesize,
339  avctx->pix_fmt, avctx->width, avctx->height, 1);
340  IMFMediaBuffer_SetCurrentLength(buffer, size);
341  IMFMediaBuffer_Unlock(buffer);
342  IMFMediaBuffer_Release(buffer);
343  if (ret < 0) {
344  IMFSample_Release(sample);
345  return NULL;
346  }
347 
348  IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->duration));
349 
350  return sample;
351 }
352 
353 static IMFSample *mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
354 {
355  MFContext *c = avctx->priv_data;
356  IMFSample *sample;
357 
358  if (c->is_audio) {
360  } else {
362  }
363 
364  if (sample)
365  mf_sample_set_pts(avctx, sample, frame->pts);
366 
367  return sample;
368 }
369 
370 static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
371 {
372  MFContext *c = avctx->priv_data;
373  HRESULT hr;
374  int ret;
375 
376  if (sample) {
377  if (c->async_events) {
378  if ((ret = mf_wait_events(avctx)) < 0)
379  return ret;
380  if (!c->async_need_input)
381  return AVERROR(EAGAIN);
382  }
383  if (!c->sample_sent)
384  IMFSample_SetUINT32(sample, &MFSampleExtension_Discontinuity, TRUE);
385  c->sample_sent = 1;
386  hr = IMFTransform_ProcessInput(c->mft, c->in_stream_id, sample, 0);
387  if (hr == MF_E_NOTACCEPTING) {
388  return AVERROR(EAGAIN);
389  } else if (FAILED(hr)) {
390  av_log(avctx, AV_LOG_ERROR, "failed processing input: %s\n", ff_hr_str(hr));
391  return AVERROR_EXTERNAL;
392  }
393  c->async_need_input = 0;
394  } else if (!c->draining) {
395  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
396  if (FAILED(hr))
397  av_log(avctx, AV_LOG_ERROR, "failed draining: %s\n", ff_hr_str(hr));
398  // Some MFTs (AC3) will send a frame after each drain command (???), so
399  // this is required to make draining actually terminate.
400  c->draining = 1;
401  c->async_need_input = 0;
402  } else {
403  return AVERROR_EOF;
404  }
405  return 0;
406 }
407 
408 static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
409 {
410  MFContext *c = avctx->priv_data;
411  HRESULT hr;
412  DWORD st;
413  MFT_OUTPUT_DATA_BUFFER out_buffers;
414  IMFSample *sample;
415  int ret = 0;
416 
417  while (1) {
418  *out_sample = NULL;
419  sample = NULL;
420 
421  if (c->async_events) {
422  if ((ret = mf_wait_events(avctx)) < 0)
423  return ret;
424  if (!c->async_have_output || c->draining_done) {
425  ret = 0;
426  break;
427  }
428  }
429 
430  if (!c->out_stream_provides_samples) {
431  sample = ff_create_memory_sample(&c->functions, NULL,
432  c->out_info.cbSize,
433  c->out_info.cbAlignment);
434  if (!sample)
435  return AVERROR(ENOMEM);
436  }
437 
438  out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
439  .dwStreamID = c->out_stream_id,
440  .pSample = sample,
441  };
442 
443  st = 0;
444  hr = IMFTransform_ProcessOutput(c->mft, 0, 1, &out_buffers, &st);
445 
446  if (out_buffers.pEvents)
447  IMFCollection_Release(out_buffers.pEvents);
448 
449  if (!FAILED(hr)) {
450  *out_sample = out_buffers.pSample;
451  ret = 0;
452  break;
453  }
454 
455  if (out_buffers.pSample)
456  IMFSample_Release(out_buffers.pSample);
457 
458  if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
459  if (c->draining)
460  c->draining_done = 1;
461  ret = 0;
462  } else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
463  av_log(avctx, AV_LOG_WARNING, "stream format change\n");
464  ret = mf_choose_output_type(avctx);
465  if (ret == 0) // we don't expect renegotiating the input type
467  if (ret > 0) {
468  ret = mf_setup_context(avctx);
469  if (ret >= 0) {
470  c->async_have_output = 0;
471  continue;
472  }
473  }
474  } else {
475  av_log(avctx, AV_LOG_ERROR, "failed processing output: %s\n", ff_hr_str(hr));
477  }
478 
479  break;
480  }
481 
482  c->async_have_output = 0;
483 
484  if (ret >= 0 && !*out_sample)
485  ret = c->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
486 
487  return ret;
488 }
489 
490 static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
491 {
492  MFContext *c = avctx->priv_data;
493  IMFSample *sample = NULL;
494  int ret;
495 
496  if (!c->frame->buf[0]) {
497  ret = ff_encode_get_frame(avctx, c->frame);
498  if (ret < 0 && ret != AVERROR_EOF)
499  return ret;
500  }
501 
502  if (c->frame->buf[0]) {
503  sample = mf_avframe_to_sample(avctx, c->frame);
504  if (!sample) {
505  av_frame_unref(c->frame);
506  return AVERROR(ENOMEM);
507  }
508  if (c->is_video && c->codec_api) {
509  if (c->frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
510  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
511  }
512  }
513 
514  ret = mf_send_sample(avctx, sample);
515  if (sample)
516  IMFSample_Release(sample);
517  if (ret != AVERROR(EAGAIN))
518  av_frame_unref(c->frame);
519  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
520  return ret;
521 
522  ret = mf_receive_sample(avctx, &sample);
523  if (ret < 0)
524  return ret;
525 
526  ret = mf_sample_to_avpacket(avctx, sample, avpkt);
527  IMFSample_Release(sample);
528 
529  return ret;
530 }
531 
532 // Most encoders seem to enumerate supported audio formats on the output types,
533 // at least as far as channel configuration and sample rate is concerned. Pick
534 // the one which seems to match best.
535 static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
536 {
537  MFContext *c = avctx->priv_data;
538  HRESULT hr;
539  UINT32 t;
540  GUID tg;
541  int64_t score = 0;
542 
543  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
544  if (!FAILED(hr) && t == avctx->sample_rate)
545  score |= 1LL << 32;
546 
547  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
548  if (!FAILED(hr) && t == avctx->ch_layout.nb_channels)
549  score |= 2LL << 32;
550 
551  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
552  if (!FAILED(hr)) {
553  if (IsEqualGUID(&c->main_subtype, &tg))
554  score |= 4LL << 32;
555  }
556 
557  // Select the bitrate (lowest priority).
558  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
559  if (!FAILED(hr)) {
560  int diff = (int)t - avctx->bit_rate / 8;
561  if (diff >= 0) {
562  score |= (1LL << 31) - diff; // prefer lower bitrate
563  } else {
564  score |= (1LL << 30) + diff; // prefer higher bitrate
565  }
566  }
567 
568  hr = IMFAttributes_GetUINT32(type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
569  if (!FAILED(hr) && t != 0)
570  return -1;
571 
572  return score;
573 }
574 
575 static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
576 {
577  // (some decoders allow adjusting this freely, but it can also cause failure
578  // to set the output type - so it's commented for being too fragile)
579  //IMFAttributes_SetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, avctx->bit_rate / 8);
580  //IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
581 
582  return 0;
583 }
584 
585 static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
586 {
587  HRESULT hr;
588  UINT32 t;
589  int64_t score = 0;
590 
591  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
592  if (sformat == AV_SAMPLE_FMT_NONE)
593  return -1; // can not use
594 
595  if (sformat == avctx->sample_fmt)
596  score |= 1;
597 
598  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
599  if (!FAILED(hr) && t == avctx->sample_rate)
600  score |= 2;
601 
602  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
603  if (!FAILED(hr) && t == avctx->ch_layout.nb_channels)
604  score |= 4;
605 
606  return score;
607 }
608 
609 static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
610 {
611  HRESULT hr;
612  UINT32 t;
613 
614  enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
615  if (sformat != avctx->sample_fmt) {
616  av_log(avctx, AV_LOG_ERROR, "unsupported input sample format set\n");
617  return AVERROR(EINVAL);
618  }
619 
620  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
621  if (FAILED(hr) || t != avctx->sample_rate) {
622  av_log(avctx, AV_LOG_ERROR, "unsupported input sample rate set\n");
623  return AVERROR(EINVAL);
624  }
625 
626  hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
627  if (FAILED(hr) || t != avctx->ch_layout.nb_channels) {
628  av_log(avctx, AV_LOG_ERROR, "unsupported input channel number set\n");
629  return AVERROR(EINVAL);
630  }
631 
632  return 0;
633 }
634 
635 static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
636 {
637  MFContext *c = avctx->priv_data;
638  GUID tg;
639  HRESULT hr;
640  int score = -1;
641 
642  hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
643  if (!FAILED(hr)) {
644  if (IsEqualGUID(&c->main_subtype, &tg))
645  score = 1;
646  }
647 
648  return score;
649 }
650 
651 static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
652 {
653  MFContext *c = avctx->priv_data;
655 
656  ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
657  IMFAttributes_SetUINT32(type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
658 
659  if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
660  framerate = avctx->framerate;
661  } else {
662  framerate = av_inv_q(avctx->time_base);
663 #if FF_API_TICKS_PER_FRAME
665  framerate.den *= avctx->ticks_per_frame;
667 #endif
668  }
669 
670  ff_MFSetAttributeRatio((IMFAttributes *)type, &MF_MT_FRAME_RATE, framerate.num, framerate.den);
671 
672  // (MS HEVC supports eAVEncH265VProfile_Main_420_8 only.)
673  if (avctx->codec_id == AV_CODEC_ID_H264) {
675  switch (avctx->profile) {
678  break;
681  break;
682  }
683  IMFAttributes_SetUINT32(type, &MF_MT_MPEG2_PROFILE, profile);
684  }
685 
686  IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
687 
688  // Note that some of the ICodecAPI options must be set before SetOutputType.
689  if (c->codec_api) {
690  if (avctx->bit_rate)
691  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMeanBitRate, FF_VAL_VT_UI4(avctx->bit_rate));
692 
693  if (c->opt_enc_rc >= 0)
694  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode, FF_VAL_VT_UI4(c->opt_enc_rc));
695 
696  if (c->opt_enc_quality >= 0)
697  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQuality, FF_VAL_VT_UI4(c->opt_enc_quality));
698 
699  if (avctx->rc_max_rate > 0)
700  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMaxBitRate, FF_VAL_VT_UI4(avctx->rc_max_rate));
701 
702  if (avctx->gop_size > 0)
703  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVGOPSize, FF_VAL_VT_UI4(avctx->gop_size));
704 
705  if(avctx->rc_buffer_size > 0)
706  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonBufferSize, FF_VAL_VT_UI4(avctx->rc_buffer_size));
707 
708  if(avctx->compression_level >= 0)
709  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQualityVsSpeed, FF_VAL_VT_UI4(avctx->compression_level));
710 
711  if(avctx->global_quality > 0)
712  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoEncodeQP, FF_VAL_VT_UI4(avctx->global_quality ));
713 
714  // Always set the number of b-frames. Qualcomm's HEVC encoder on SD835
715  // defaults this to 1, and that setting is buggy with many of the
716  // rate control modes. (0 or 2 b-frames works fine with most rate
717  // control modes, but 2 seems buggy with the u_vbr mode.) Setting
718  // "scenario" to "camera_record" sets it in CFR mode (where the default
719  // is VFR), which makes the encoder avoid dropping frames.
720  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVDefaultBPictureCount, FF_VAL_VT_UI4(avctx->max_b_frames));
721  avctx->has_b_frames = avctx->max_b_frames > 0;
722 
723  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable, FF_VAL_VT_BOOL(1));
724 
725  if (c->opt_enc_scenario >= 0)
726  ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVScenarioInfo, FF_VAL_VT_UI4(c->opt_enc_scenario));
727  }
728 
729  return 0;
730 }
731 
732 static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
733 {
734  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
735  if (pix_fmt != avctx->pix_fmt)
736  return -1; // can not use
737 
738  return 0;
739 }
740 
741 static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
742 {
743  enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
744  if (pix_fmt != avctx->pix_fmt) {
745  av_log(avctx, AV_LOG_ERROR, "unsupported input pixel format set\n");
746  return AVERROR(EINVAL);
747  }
748 
749  //ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
750 
751  return 0;
752 }
753 
755 {
756  MFContext *c = avctx->priv_data;
757  HRESULT hr;
758  int ret;
759  IMFMediaType *out_type = NULL;
760  int64_t out_type_score = -1;
761  int out_type_index = -1;
762  int n;
763 
764  av_log(avctx, AV_LOG_VERBOSE, "output types:\n");
765  for (n = 0; ; n++) {
766  IMFMediaType *type;
767  int64_t score = -1;
768 
769  hr = IMFTransform_GetOutputAvailableType(c->mft, c->out_stream_id, n, &type);
770  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
771  break;
772  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
773  av_log(avctx, AV_LOG_VERBOSE, "(need to set input type)\n");
774  ret = 0;
775  goto done;
776  }
777  if (FAILED(hr)) {
778  av_log(avctx, AV_LOG_ERROR, "error getting output type: %s\n", ff_hr_str(hr));
780  goto done;
781  }
782 
783  av_log(avctx, AV_LOG_VERBOSE, "output type %d:\n", n);
784  ff_media_type_dump(avctx, type);
785 
786  if (c->is_video) {
787  score = mf_encv_output_score(avctx, type);
788  } else if (c->is_audio) {
789  score = mf_enca_output_score(avctx, type);
790  }
791 
792  if (score > out_type_score) {
793  if (out_type)
794  IMFMediaType_Release(out_type);
795  out_type = type;
796  out_type_score = score;
797  out_type_index = n;
798  IMFMediaType_AddRef(out_type);
799  }
800 
801  IMFMediaType_Release(type);
802  }
803 
804  if (out_type) {
805  av_log(avctx, AV_LOG_VERBOSE, "picking output type %d.\n", out_type_index);
806  } else {
807  hr = c->functions.MFCreateMediaType(&out_type);
808  if (FAILED(hr)) {
809  ret = AVERROR(ENOMEM);
810  goto done;
811  }
812  }
813 
814  ret = 0;
815  if (c->is_video) {
816  ret = mf_encv_output_adjust(avctx, out_type);
817  } else if (c->is_audio) {
818  ret = mf_enca_output_adjust(avctx, out_type);
819  }
820 
821  if (ret >= 0) {
822  av_log(avctx, AV_LOG_VERBOSE, "setting output type:\n");
823  ff_media_type_dump(avctx, out_type);
824 
825  hr = IMFTransform_SetOutputType(c->mft, c->out_stream_id, out_type, 0);
826  if (!FAILED(hr)) {
827  ret = 1;
828  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
829  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set input type\n");
830  ret = 0;
831  } else {
832  av_log(avctx, AV_LOG_ERROR, "could not set output type (%s)\n", ff_hr_str(hr));
834  }
835  }
836 
837 done:
838  if (out_type)
839  IMFMediaType_Release(out_type);
840  return ret;
841 }
842 
844 {
845  MFContext *c = avctx->priv_data;
846  HRESULT hr;
847  int ret;
848  IMFMediaType *in_type = NULL;
849  int64_t in_type_score = -1;
850  int in_type_index = -1;
851  int n;
852 
853  av_log(avctx, AV_LOG_VERBOSE, "input types:\n");
854  for (n = 0; ; n++) {
855  IMFMediaType *type = NULL;
856  int64_t score = -1;
857 
858  hr = IMFTransform_GetInputAvailableType(c->mft, c->in_stream_id, n, &type);
859  if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
860  break;
861  if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
862  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 1)\n");
863  ret = 0;
864  goto done;
865  }
866  if (FAILED(hr)) {
867  av_log(avctx, AV_LOG_ERROR, "error getting input type: %s\n", ff_hr_str(hr));
869  goto done;
870  }
871 
872  av_log(avctx, AV_LOG_VERBOSE, "input type %d:\n", n);
873  ff_media_type_dump(avctx, type);
874 
875  if (c->is_video) {
876  score = mf_encv_input_score(avctx, type);
877  } else if (c->is_audio) {
878  score = mf_enca_input_score(avctx, type);
879  }
880 
881  if (score > in_type_score) {
882  if (in_type)
883  IMFMediaType_Release(in_type);
884  in_type = type;
885  in_type_score = score;
886  in_type_index = n;
887  IMFMediaType_AddRef(in_type);
888  }
889 
890  IMFMediaType_Release(type);
891  }
892 
893  if (in_type) {
894  av_log(avctx, AV_LOG_VERBOSE, "picking input type %d.\n", in_type_index);
895  } else {
896  // Some buggy MFTs (WMA encoder) fail to return MF_E_TRANSFORM_TYPE_NOT_SET.
897  av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 2)\n");
898  ret = 0;
899  goto done;
900  }
901 
902  ret = 0;
903  if (c->is_video) {
904  ret = mf_encv_input_adjust(avctx, in_type);
905  } else if (c->is_audio) {
906  ret = mf_enca_input_adjust(avctx, in_type);
907  }
908 
909  if (ret >= 0) {
910  av_log(avctx, AV_LOG_VERBOSE, "setting input type:\n");
911  ff_media_type_dump(avctx, in_type);
912 
913  hr = IMFTransform_SetInputType(c->mft, c->in_stream_id, in_type, 0);
914  if (!FAILED(hr)) {
915  ret = 1;
916  } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
917  av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set output type\n");
918  ret = 0;
919  } else {
920  av_log(avctx, AV_LOG_ERROR, "could not set input type (%s)\n", ff_hr_str(hr));
922  }
923  }
924 
925 done:
926  if (in_type)
927  IMFMediaType_Release(in_type);
928  return ret;
929 }
930 
932 {
933  // This follows steps 1-5 on:
934  // https://msdn.microsoft.com/en-us/library/windows/desktop/aa965264(v=vs.85).aspx
935  // If every MFT implementer does this correctly, this loop should at worst
936  // be repeated once.
937  int need_input = 1, need_output = 1;
938  int n;
939  for (n = 0; n < 2 && (need_input || need_output); n++) {
940  int ret;
941  ret = mf_choose_input_type(avctx);
942  if (ret < 0)
943  return ret;
944  need_input = ret < 1;
945  ret = mf_choose_output_type(avctx);
946  if (ret < 0)
947  return ret;
948  need_output = ret < 1;
949  }
950  if (need_input || need_output) {
951  av_log(avctx, AV_LOG_ERROR, "format negotiation failed (%d/%d)\n",
952  need_input, need_output);
953  return AVERROR_EXTERNAL;
954  }
955  return 0;
956 }
957 
959 {
960  MFContext *c = avctx->priv_data;
961  HRESULT hr;
962  int ret;
963 
964  hr = IMFTransform_GetInputStreamInfo(c->mft, c->in_stream_id, &c->in_info);
965  if (FAILED(hr))
966  return AVERROR_EXTERNAL;
967  av_log(avctx, AV_LOG_VERBOSE, "in_info: size=%d, align=%d\n",
968  (int)c->in_info.cbSize, (int)c->in_info.cbAlignment);
969 
970  hr = IMFTransform_GetOutputStreamInfo(c->mft, c->out_stream_id, &c->out_info);
971  if (FAILED(hr))
972  return AVERROR_EXTERNAL;
973  c->out_stream_provides_samples =
974  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
975  (c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
976  av_log(avctx, AV_LOG_VERBOSE, "out_info: size=%d, align=%d%s\n",
977  (int)c->out_info.cbSize, (int)c->out_info.cbAlignment,
978  c->out_stream_provides_samples ? " (provides samples)" : "");
979 
980  if ((ret = mf_output_type_get(avctx)) < 0)
981  return ret;
982 
983  return 0;
984 }
985 
986 static int mf_unlock_async(AVCodecContext *avctx)
987 {
988  MFContext *c = avctx->priv_data;
989  HRESULT hr;
990  IMFAttributes *attrs;
991  UINT32 v;
992  int res = AVERROR_EXTERNAL;
993 
994  // For hw encoding we unfortunately need to use async mode, otherwise
995  // play it safe and avoid it.
996  if (!(c->is_video && c->opt_enc_hw))
997  return 0;
998 
999  hr = IMFTransform_GetAttributes(c->mft, &attrs);
1000  if (FAILED(hr)) {
1001  av_log(avctx, AV_LOG_ERROR, "error retrieving MFT attributes: %s\n", ff_hr_str(hr));
1002  goto err;
1003  }
1004 
1005  hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
1006  if (FAILED(hr)) {
1007  av_log(avctx, AV_LOG_ERROR, "error querying async: %s\n", ff_hr_str(hr));
1008  goto err;
1009  }
1010 
1011  if (!v) {
1012  av_log(avctx, AV_LOG_ERROR, "hardware MFT is not async\n");
1013  goto err;
1014  }
1015 
1016  hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
1017  if (FAILED(hr)) {
1018  av_log(avctx, AV_LOG_ERROR, "could not set async unlock: %s\n", ff_hr_str(hr));
1019  goto err;
1020  }
1021 
1022  hr = IMFTransform_QueryInterface(c->mft, &IID_IMFMediaEventGenerator, (void **)&c->async_events);
1023  if (FAILED(hr)) {
1024  av_log(avctx, AV_LOG_ERROR, "could not get async interface\n");
1025  goto err;
1026  }
1027 
1028  res = 0;
1029 
1030 err:
1031  IMFAttributes_Release(attrs);
1032  return res;
1033 }
1034 
1035 static int mf_create(void *log, MFFunctions *f, IMFTransform **mft,
1036  const AVCodec *codec, int use_hw)
1037 {
1038  int is_audio = codec->type == AVMEDIA_TYPE_AUDIO;
1039  const CLSID *subtype = ff_codec_to_mf_subtype(codec->id);
1040  MFT_REGISTER_TYPE_INFO reg = {0};
1041  GUID category;
1042  int ret;
1043 
1044  *mft = NULL;
1045 
1046  if (!subtype)
1047  return AVERROR(ENOSYS);
1048 
1049  reg.guidSubtype = *subtype;
1050 
1051  if (is_audio) {
1052  reg.guidMajorType = MFMediaType_Audio;
1053  category = MFT_CATEGORY_AUDIO_ENCODER;
1054  } else {
1055  reg.guidMajorType = MFMediaType_Video;
1056  category = MFT_CATEGORY_VIDEO_ENCODER;
1057  }
1058 
1059  if ((ret = ff_instantiate_mf(log, f, category, NULL, &reg, use_hw, mft)) < 0)
1060  return ret;
1061 
1062  return 0;
1063 }
1064 
1066 {
1067  MFContext *c = avctx->priv_data;
1068  HRESULT hr;
1069  int ret;
1070  const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id);
1071  int use_hw = 0;
1072 
1073  c->frame = av_frame_alloc();
1074  if (!c->frame)
1075  return AVERROR(ENOMEM);
1076 
1077  c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO;
1078  c->is_video = !c->is_audio;
1079  c->reorder_delay = AV_NOPTS_VALUE;
1080 
1081  if (c->is_video && c->opt_enc_hw)
1082  use_hw = 1;
1083 
1084  if (!subtype)
1085  return AVERROR(ENOSYS);
1086 
1087  c->main_subtype = *subtype;
1088 
1089  if ((ret = mf_create(avctx, &c->functions, &c->mft, avctx->codec, use_hw)) < 0)
1090  return ret;
1091 
1092  if ((ret = mf_unlock_async(avctx)) < 0)
1093  return ret;
1094 
1095  hr = IMFTransform_QueryInterface(c->mft, &IID_ICodecAPI, (void **)&c->codec_api);
1096  if (!FAILED(hr))
1097  av_log(avctx, AV_LOG_VERBOSE, "MFT supports ICodecAPI.\n");
1098 
1099 
1100  hr = IMFTransform_GetStreamIDs(c->mft, 1, &c->in_stream_id, 1, &c->out_stream_id);
1101  if (hr == E_NOTIMPL) {
1102  c->in_stream_id = c->out_stream_id = 0;
1103  } else if (FAILED(hr)) {
1104  av_log(avctx, AV_LOG_ERROR, "could not get stream IDs (%s)\n", ff_hr_str(hr));
1105  return AVERROR_EXTERNAL;
1106  }
1107 
1108  if ((ret = mf_negotiate_types(avctx)) < 0)
1109  return ret;
1110 
1111  if ((ret = mf_setup_context(avctx)) < 0)
1112  return ret;
1113 
1114  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
1115  if (FAILED(hr)) {
1116  av_log(avctx, AV_LOG_ERROR, "could not start streaming (%s)\n", ff_hr_str(hr));
1117  return AVERROR_EXTERNAL;
1118  }
1119 
1120  hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
1121  if (FAILED(hr)) {
1122  av_log(avctx, AV_LOG_ERROR, "could not start stream (%s)\n", ff_hr_str(hr));
1123  return AVERROR_EXTERNAL;
1124  }
1125 
1126  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && c->async_events &&
1127  c->is_video && !avctx->extradata) {
1128  int sleep = 10000, total = 0;
1129  av_log(avctx, AV_LOG_VERBOSE, "Awaiting extradata\n");
1130  while (total < 70*1000) {
1131  // The Qualcomm H264 encoder on SD835 doesn't provide extradata
1132  // immediately, but it becomes available soon after init (without
1133  // any waitable event). In practice, it's available after less
1134  // than 10 ms, but wait for up to 70 ms before giving up.
1135  // Some encoders (Qualcomm's HEVC encoder on SD835, some versions
1136  // of the QSV H264 encoder at least) don't provide extradata this
1137  // way at all, not even after encoding a frame - it's only
1138  // available prepended to frames.
1139  av_usleep(sleep);
1140  total += sleep;
1141  mf_output_type_get(avctx);
1142  if (avctx->extradata)
1143  break;
1144  sleep *= 2;
1145  }
1146  av_log(avctx, AV_LOG_VERBOSE, "%s extradata in %d ms\n",
1147  avctx->extradata ? "Got" : "Didn't get", total / 1000);
1148  }
1149 
1150  return 0;
1151 }
1152 
1153 #if !HAVE_UWP
1154 #define LOAD_MF_FUNCTION(context, func_name) \
1155  context->functions.func_name = (void *)dlsym(context->library, #func_name); \
1156  if (!context->functions.func_name) { \
1157  av_log(context, AV_LOG_ERROR, "DLL mfplat.dll failed to find function "\
1158  #func_name "\n"); \
1159  return AVERROR_UNKNOWN; \
1160  }
1161 #else
1162 // In UWP (which lacks LoadLibrary), just link directly against
1163 // the functions - this requires building with new/complete enough
1164 // import libraries.
1165 #define LOAD_MF_FUNCTION(context, func_name) \
1166  context->functions.func_name = func_name; \
1167  if (!context->functions.func_name) { \
1168  av_log(context, AV_LOG_ERROR, "Failed to find function " #func_name \
1169  "\n"); \
1170  return AVERROR_UNKNOWN; \
1171  }
1172 #endif
1173 
1174 // Windows N editions does not provide MediaFoundation by default.
1175 // So to avoid DLL loading error, MediaFoundation is dynamically loaded except
1176 // on UWP build since LoadLibrary is not available on it.
1178 {
1179  MFContext *c = avctx->priv_data;
1180 
1181 #if !HAVE_UWP
1182  c->library = dlopen("mfplat.dll", 0);
1183 
1184  if (!c->library) {
1185  av_log(c, AV_LOG_ERROR, "DLL mfplat.dll failed to open\n");
1186  return AVERROR_UNKNOWN;
1187  }
1188 #endif
1189 
1190  LOAD_MF_FUNCTION(c, MFStartup);
1191  LOAD_MF_FUNCTION(c, MFShutdown);
1192  LOAD_MF_FUNCTION(c, MFCreateAlignedMemoryBuffer);
1193  LOAD_MF_FUNCTION(c, MFCreateSample);
1194  LOAD_MF_FUNCTION(c, MFCreateMediaType);
1195  // MFTEnumEx is missing in Windows Vista's mfplat.dll.
1196  LOAD_MF_FUNCTION(c, MFTEnumEx);
1197 
1198  return 0;
1199 }
1200 
1201 static int mf_close(AVCodecContext *avctx)
1202 {
1203  MFContext *c = avctx->priv_data;
1204 
1205  if (c->codec_api)
1206  ICodecAPI_Release(c->codec_api);
1207 
1208  if (c->async_events)
1209  IMFMediaEventGenerator_Release(c->async_events);
1210 
1211 #if !HAVE_UWP
1212  if (c->library)
1213  ff_free_mf(&c->functions, &c->mft);
1214 
1215  dlclose(c->library);
1216  c->library = NULL;
1217 #else
1218  ff_free_mf(&c->functions, &c->mft);
1219 #endif
1220 
1221  av_frame_free(&c->frame);
1222 
1223  av_freep(&avctx->extradata);
1224  avctx->extradata_size = 0;
1225 
1226  return 0;
1227 }
1228 
1229 static int mf_init(AVCodecContext *avctx)
1230 {
1231  int ret;
1232  if ((ret = mf_load_library(avctx)) == 0) {
1233  if ((ret = mf_init_encoder(avctx)) == 0) {
1234  return 0;
1235  }
1236  }
1237  return ret;
1238 }
1239 
1240 #define OFFSET(x) offsetof(MFContext, x)
1241 
1242 #define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS, DEFAULTS) \
1243  static const AVClass ff_ ## NAME ## _mf_encoder_class = { \
1244  .class_name = #NAME "_mf", \
1245  .item_name = av_default_item_name, \
1246  .option = OPTS, \
1247  .version = LIBAVUTIL_VERSION_INT, \
1248  }; \
1249  const FFCodec ff_ ## NAME ## _mf_encoder = { \
1250  .p.priv_class = &ff_ ## NAME ## _mf_encoder_class, \
1251  .p.name = #NAME "_mf", \
1252  CODEC_LONG_NAME(#ID " via MediaFoundation"), \
1253  .p.type = AVMEDIA_TYPE_ ## MEDIATYPE, \
1254  .p.id = AV_CODEC_ID_ ## ID, \
1255  .priv_data_size = sizeof(MFContext), \
1256  .init = mf_init, \
1257  .close = mf_close, \
1258  FF_CODEC_RECEIVE_PACKET_CB(mf_receive_packet), \
1259  FMTS \
1260  CAPS \
1261  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
1262  .defaults = DEFAULTS, \
1263  };
1264 
1265 #define AFMTS \
1266  .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, \
1267  AV_SAMPLE_FMT_NONE },
1268 #define ACAPS \
1269  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1270  AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
1271 
1272 MF_ENCODER(AUDIO, aac, AAC, NULL, AFMTS, ACAPS, NULL);
1273 MF_ENCODER(AUDIO, ac3, AC3, NULL, AFMTS, ACAPS, NULL);
1274 MF_ENCODER(AUDIO, mp3, MP3, NULL, AFMTS, ACAPS, NULL);
1275 
1276 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1277 static const AVOption venc_opts[] = {
1278  {"rate_control", "Select rate control mode", OFFSET(opt_enc_rc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, .unit = "rate_control"},
1279  { "default", "Default mode", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "rate_control"},
1280  { "cbr", "CBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_CBR}, 0, 0, VE, .unit = "rate_control"},
1281  { "pc_vbr", "Peak constrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_PeakConstrainedVBR}, 0, 0, VE, .unit = "rate_control"},
1282  { "u_vbr", "Unconstrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_UnconstrainedVBR}, 0, 0, VE, .unit = "rate_control"},
1283  { "quality", "Quality mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_Quality}, 0, 0, VE, .unit = "rate_control" },
1284  // The following rate_control modes require Windows 8.
1285  { "ld_vbr", "Low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_LowDelayVBR}, 0, 0, VE, .unit = "rate_control"},
1286  { "g_vbr", "Global VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalVBR}, 0, 0, VE, .unit = "rate_control" },
1287  { "gld_vbr", "Global low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR}, 0, 0, VE, .unit = "rate_control"},
1288 
1289  {"scenario", "Select usage scenario", OFFSET(opt_enc_scenario), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, .unit = "scenario"},
1290  { "default", "Default scenario", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "scenario"},
1291  { "display_remoting", "Display remoting", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemoting}, 0, 0, VE, .unit = "scenario"},
1292  { "video_conference", "Video conference", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_VideoConference}, 0, 0, VE, .unit = "scenario"},
1293  { "archive", "Archive", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_Archive}, 0, 0, VE, .unit = "scenario"},
1294  { "live_streaming", "Live streaming", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_LiveStreaming}, 0, 0, VE, .unit = "scenario"},
1295  { "camera_record", "Camera record", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_CameraRecord}, 0, 0, VE, .unit = "scenario"},
1296  { "display_remoting_with_feature_map", "Display remoting with feature map", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap}, 0, 0, VE, .unit = "scenario"},
1297 
1298  {"quality", "Quality", OFFSET(opt_enc_quality), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 100, VE},
1299  {"hw_encoding", "Force hardware encoding", OFFSET(opt_enc_hw), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VE},
1300  {NULL}
1301 };
1302 
1303 static const FFCodecDefault defaults[] = {
1304  { "g", "0" },
1305  { NULL },
1306 };
1307 
1308 #define VFMTS \
1309  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1310  AV_PIX_FMT_YUV420P, \
1311  AV_PIX_FMT_NONE },
1312 #define VCAPS \
1313  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1314  AV_CODEC_CAP_DR1,
1315 
1316 MF_ENCODER(VIDEO, h264, H264, venc_opts, VFMTS, VCAPS, defaults);
1317 MF_ENCODER(VIDEO, hevc, HEVC, venc_opts, VFMTS, VCAPS, defaults);
AVCodec
AVCodec.
Definition: codec.h:187
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
ff_hr_str
#define ff_hr_str(hr)
Definition: mf_utils.h:161
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
VCAPS
#define VCAPS
Definition: mfenc.c:1312
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:443
FF_VAL_VT_UI4
#define FF_VAL_VT_UI4(v)
Definition: mf_utils.h:166
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1056
venc_opts
static const AVOption venc_opts[]
Definition: mfenc.c:1277
mf_v_avframe_to_sample
static IMFSample * mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:306
mf_choose_input_type
static int mf_choose_input_type(AVCodecContext *avctx)
Definition: mfenc.c:843
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
LOAD_MF_FUNCTION
#define LOAD_MF_FUNCTION(context, func_name)
Definition: mfenc.c:1154
AV_PROFILE_H264_MAIN
#define AV_PROFILE_H264_MAIN
Definition: defs.h:112
int64_t
long long int64_t
Definition: coverity.c:34
normalize.log
log
Definition: normalize.py:21
ff_codec_to_mf_subtype
const CLSID * ff_codec_to_mf_subtype(enum AVCodecID codec)
Definition: mf_utils.c:507
mf_enca_output_score
static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:535
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
MFContext::opt_enc_hw
int opt_enc_hw
Definition: mfenc.c:57
MFContext::av_class
AVClass * av_class
Definition: mfenc.c:36
mf_receive_sample
static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
Definition: mfenc.c:408
MF_ENCODER
#define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS, DEFAULTS)
Definition: mfenc.c:1242
mf_enca_output_type_get
static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:141
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:539
MFContext::sample_sent
int sample_sent
Definition: mfenc.c:49
ff_eAVEncCommonRateControlMode_Quality
@ ff_eAVEncCommonRateControlMode_Quality
Definition: mf_utils.h:123
ff_eAVEncCommonRateControlMode_CBR
@ ff_eAVEncCommonRateControlMode_CBR
Definition: mf_utils.h:120
AVOption
AVOption.
Definition: opt.h:429
encode.h
data
const char data[16]
Definition: mxf.c:148
mf_encv_output_score
static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:635
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
category
category
Definition: openal-dec.c:249
MFContext::draining_done
int draining_done
Definition: mfenc.c:48
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
ff_MFSetAttributeSize
HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid, UINT32 uw, UINT32 uh)
Definition: mf_utils.c:40
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
MFContext::functions
MFFunctions functions
Definition: mfenc.c:38
ff_eAVEncH264VProfile_High
@ ff_eAVEncH264VProfile_High
Definition: mf_utils.h:157
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:338
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:566
MFContext::out_stream_id
DWORD out_stream_id
Definition: mfenc.c:44
MFContext::async_marker
int async_marker
Definition: mfenc.c:50
FFCodecDefault
Definition: codec_internal.h:97
mf_avframe_to_sample
static IMFSample * mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:353
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1071
ff_media_type_to_sample_fmt
enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type)
Definition: mf_utils.c:114
MFContext::async_need_input
int async_need_input
Definition: mfenc.c:50
OFFSET
#define OFFSET(x)
Definition: mfenc.c:1240
mf_receive_packet
static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: mfenc.c:490
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
MFContext::is_audio
int is_audio
Definition: mfenc.c:40
mf_utils.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
VFMTS
#define VFMTS
Definition: mfenc.c:1308
pts
static int64_t pts
Definition: transcode_aac.c:644
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:441
ff_eAVEncCommonRateControlMode_GlobalVBR
@ ff_eAVEncCommonRateControlMode_GlobalVBR
Definition: mf_utils.h:125
ff_METransformMarker
@ ff_METransformMarker
Definition: mf_utils.h:147
AVRational::num
int num
Numerator.
Definition: rational.h:59
ff_instantiate_mf
int ff_instantiate_mf(void *log, MFFunctions *f, GUID category, MFT_REGISTER_TYPE_INFO *in_type, MFT_REGISTER_TYPE_INFO *out_type, int use_hw, IMFTransform **res)
Definition: mf_utils.c:550
ff_free_mf
void ff_free_mf(MFFunctions *f, IMFTransform **mft)
Definition: mf_utils.c:643
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
mf_setup_context
static int mf_setup_context(AVCodecContext *avctx)
Definition: mfenc.c:958
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:729
MFContext::opt_enc_rc
int opt_enc_rc
Definition: mfenc.c:54
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1249
MFContext::reorder_delay
int64_t reorder_delay
Definition: mfenc.c:51
mf_encv_output_adjust
static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:651
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
MFContext::opt_enc_scenario
int opt_enc_scenario
Definition: mfenc.c:56
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
MFContext::codec_api
ICodecAPI * codec_api
Definition: mfenc.c:52
MFContext::in_info
MFT_INPUT_STREAM_INFO in_info
Definition: mfenc.c:45
MFContext::out_stream_provides_samples
int out_stream_provides_samples
Definition: mfenc.c:47
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1302
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
MFContext::library
HMODULE library
Definition: mfenc.c:37
MFContext::frame
AVFrame * frame
Definition: mfenc.c:39
if
if(ret)
Definition: filter_design.txt:179
ff_eAVScenarioInfo_LiveStreaming
@ ff_eAVScenarioInfo_LiveStreaming
Definition: mf_utils.h:134
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1287
ff_MFSetAttributeRatio
#define ff_MFSetAttributeRatio
Definition: mf_utils.c:47
framerate
float framerate
Definition: av1_levels.c:29
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
@ ff_eAVEncCommonRateControlMode_PeakConstrainedVBR
Definition: mf_utils.h:121
AVCodec::type
enum AVMediaType type
Definition: codec.h:200
mf_enca_input_score
static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:585
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
MF_INVALID_TIME
#define MF_INVALID_TIME
Definition: mfenc.c:65
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
mf_enca_input_adjust
static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:609
time.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_AAC
@ AV_CODEC_ID_AAC
Definition: codec_id.h:442
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:550
ff_media_type_to_pix_fmt
enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type)
Definition: mf_utils.c:158
MFFunctions
Definition: mf_utils.h:48
f
f
Definition: af_crystalizer.c:122
mf_a_avframe_to_sample
static IMFSample * mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
Definition: mfenc.c:289
ff_eAVScenarioInfo_Archive
@ ff_eAVScenarioInfo_Archive
Definition: mf_utils.h:133
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1037
codec_internal.h
bps
unsigned bps
Definition: movenc.c:1877
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1063
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
MFContext::is_video
int is_video
Definition: mfenc.c:40
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_eAVEncH264VProfile_Base
@ ff_eAVEncH264VProfile_Base
Definition: mf_utils.h:155
ff_eAVScenarioInfo_DisplayRemoting
@ ff_eAVScenarioInfo_DisplayRemoting
Definition: mf_utils.h:131
ACAPS
#define ACAPS
Definition: mfenc.c:1268
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
MFContext::opt_enc_quality
int opt_enc_quality
Definition: mfenc.c:55
MFContext::async_events
IMFMediaEventGenerator * async_events
Definition: mfenc.c:43
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
ff_eAVEncCommonRateControlMode_UnconstrainedVBR
@ ff_eAVEncCommonRateControlMode_UnconstrainedVBR
Definition: mf_utils.h:122
MF_TIMEBASE
#define MF_TIMEBASE
Definition: mfenc.c:63
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
@ ff_eAVScenarioInfo_DisplayRemotingWithFeatureMap
Definition: mf_utils.h:136
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
mf_get_tb
static AVRational mf_get_tb(AVCodecContext *avctx)
Definition: mfenc.c:106
ff_METransformNeedInput
@ ff_METransformNeedInput
Definition: mf_utils.h:144
mf_load_library
static int mf_load_library(AVCodecContext *avctx)
Definition: mfenc.c:1177
ff_METransformDrainComplete
@ ff_METransformDrainComplete
Definition: mf_utils.h:146
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
mf_send_sample
static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:370
MFContext::in_stream_id
DWORD in_stream_id
Definition: mfenc.c:44
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
MFContext::async_have_output
int async_have_output
Definition: mfenc.c:50
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
VE
#define VE
Definition: mfenc.c:1276
MFContext::out_info
MFT_OUTPUT_STREAM_INFO out_info
Definition: mfenc.c:46
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
profile
int profile
Definition: mxfenc.c:2228
MFContext
Definition: mfenc.c:35
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
mf_negotiate_types
static int mf_negotiate_types(AVCodecContext *avctx)
Definition: mfenc.c:931
mf_enca_output_adjust
static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:575
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
mf_sample_set_pts
static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
Definition: mfenc.c:120
mf_to_mf_time
static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
Definition: mfenc.c:113
mf_create
static int mf_create(void *log, MFFunctions *f, IMFTransform **mft, const AVCodec *codec, int use_hw)
Definition: mfenc.c:1035
mf_from_mf_time
static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
Definition: mfenc.c:127
mf_init
static int mf_init(AVCodecContext *avctx)
Definition: mfenc.c:1229
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AV_PROFILE_H264_HIGH
#define AV_PROFILE_H264_HIGH
Definition: defs.h:114
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:106
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1650
mf_init_encoder
static int mf_init_encoder(AVCodecContext *avctx)
Definition: mfenc.c:1065
mf_close
static int mf_close(AVCodecContext *avctx)
Definition: mfenc.c:1201
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:582
mf_encv_input_adjust
static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:741
MFContext::draining
int draining
Definition: mfenc.c:48
ff_eAVScenarioInfo_CameraRecord
@ ff_eAVScenarioInfo_CameraRecord
Definition: mf_utils.h:135
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:459
FF_VAL_VT_BOOL
#define FF_VAL_VT_BOOL(v)
Definition: mf_utils.h:167
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:801
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:205
mf_choose_output_type
static int mf_choose_output_type(AVCodecContext *avctx)
Definition: mfenc.c:754
ff_eAVScenarioInfo_VideoConference
@ ff_eAVScenarioInfo_VideoConference
Definition: mf_utils.h:132
AFMTS
#define AFMTS
Definition: mfenc.c:1265
ff_media_type_dump
void ff_media_type_dump(void *log, IMFMediaType *type)
Definition: mf_utils.c:502
defaults
static const FFCodecDefault defaults[]
Definition: mfenc.c:1303
mf_encv_input_score
static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:732
mf_output_type_get
static int mf_output_type_get(AVCodecContext *avctx)
Definition: mfenc.c:204
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
mf_sample_to_avpacket
static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
Definition: mfenc.c:234
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
ff_METransformHaveOutput
@ ff_METransformHaveOutput
Definition: mf_utils.h:145
imgutils.h
MFContext::mft
IMFTransform * mft
Definition: mfenc.c:42
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
mf_unlock_async
static int mf_unlock_async(AVCodecContext *avctx)
Definition: mfenc.c:986
ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
@ ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR
Definition: mf_utils.h:126
mf_wait_events
static int mf_wait_events(AVCodecContext *avctx)
Definition: mfenc.c:67
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_eAVEncH264VProfile_Main
@ ff_eAVEncH264VProfile_Main
Definition: mf_utils.h:156
mf_sample_get_pts
static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
Definition: mfenc.c:132
MFContext::main_subtype
GUID main_subtype
Definition: mfenc.c:41
ff_eAVEncCommonRateControlMode_LowDelayVBR
@ ff_eAVEncCommonRateControlMode_LowDelayVBR
Definition: mf_utils.h:124
w32dlfcn.h
mf_encv_output_type_get
static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
Definition: mfenc.c:181
AVCodecContext::compression_level
int compression_level
Definition: avcodec.h:1255
ff_create_memory_sample
IMFSample * ff_create_memory_sample(MFFunctions *f, void *fill_data, size_t size, size_t align)
Definition: mf_utils.c:76