31 #include <DeckLinkAPI.h> 42 #include "libklvanc/vanc.h" 43 #include "libklvanc/vanc-lines.h" 44 #include "libklvanc/pixels.h" 62 return ((
GetWidth() + 47) / 48) * 128;
67 return bmdFormat8BitYUV;
69 return bmdFormat10BitYUV;
71 virtual BMDFrameFlags STDMETHODCALLTYPE
GetFlags (
void)
74 return _avframe->
linesize[0] < 0 ? bmdFrameFlagFlipVertical : bmdFrameFlagDefault;
76 return bmdFrameFlagDefault;
92 virtual HRESULT STDMETHODCALLTYPE
GetTimecode (BMDTimecodeFormat
format, IDeckLinkTimecode **timecode) {
return S_FALSE; }
93 virtual HRESULT STDMETHODCALLTYPE
GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
103 virtual HRESULT STDMETHODCALLTYPE
SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
111 virtual HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID iid, LPVOID *ppv) {
return E_NOINTERFACE; }
159 virtual HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID iid, LPVOID *ppv) {
return E_NOINTERFACE; }
160 virtual ULONG STDMETHODCALLTYPE
AddRef(
void) {
return 1; }
161 virtual ULONG STDMETHODCALLTYPE
Release(
void) {
return 1; }
178 " Only AV_PIX_FMT_UYVY422 is supported.\n");
181 ctx->raw_format = bmdFormat8BitYUV;
184 " Only V210 and wrapped frame with AV_PIX_FMT_UYVY422 are supported.\n");
187 ctx->raw_format = bmdFormat10BitYUV;
197 " Check available formats with -list_formats 1.\n");
200 if (
ctx->supports_vanc &&
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputVANC) != S_OK) {
202 ctx->supports_vanc = 0;
204 if (!
ctx->supports_vanc &&
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputFlagDefault) != S_OK) {
211 ctx->dlo->SetScheduledFrameCompletionCallback(
ctx->output_callback);
215 ctx->frames_preroll /= 1000;
218 ctx->frames_buffer =
ctx->frames_preroll * 2;
219 ctx->frames_buffer =
FFMIN(
ctx->frames_buffer, 60);
222 ctx->frames_buffer_available_spots =
ctx->frames_buffer;
225 avctx->
url,
ctx->frames_preroll,
ctx->frames_buffer);
247 " Only 48kHz is supported.\n");
252 " Only 2, 8 or 16 channels are supported.\n");
255 if (
ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
256 bmdAudioSampleType16bitInteger,
258 bmdAudioOutputStreamTimestamped) != S_OK) {
262 if (
ctx->dlo->BeginAudioPreroll() != S_OK) {
281 if (
ctx->playback_started) {
283 ctx->dlo->StopScheduledPlayback(
ctx->last_pts *
ctx->bmd_tb_num,
284 &actual,
ctx->bmd_tb_den);
285 ctx->dlo->DisableVideoOutput();
287 ctx->dlo->DisableAudioOutput();
292 if (
ctx->output_callback)
293 delete ctx->output_callback;
299 klvanc_context_destroy(
ctx->vanc_ctx);
309 AVPacket *
pkt,
struct klvanc_line_set_s *vanc_lines)
311 struct klvanc_packet_eia_708b_s *cdp;
323 ret = klvanc_create_eia708_cdp(&cdp);
331 klvanc_destroy_eia708_cdp(cdp);
335 if (cc_count > KLVANC_MAX_CC_COUNT) {
337 cc_count = KLVANC_MAX_CC_COUNT;
341 cdp->header.ccdata_present = 1;
342 cdp->header.caption_service_active = 1;
343 cdp->ccdata.cc_count = cc_count;
344 for (i = 0; i < cc_count; i++) {
345 if (data [3*i] & 0x04)
346 cdp->ccdata.cc[
i].cc_valid = 1;
347 cdp->ccdata.cc[
i].cc_type = data[3*
i] & 0x03;
348 cdp->ccdata.cc[
i].cc_data[0] = data[3*i+1];
349 cdp->ccdata.cc[
i].cc_data[1] = data[3*i+2];
353 ret = klvanc_convert_EIA_708B_to_words(cdp, &cdp_words, &len);
354 klvanc_destroy_eia708_cdp(cdp);
360 ret = klvanc_line_insert(ctx->vanc_ctx, vanc_lines, cdp_words, len, 11, 0);
371 struct klvanc_line_set_s vanc_lines = { 0 };
377 construct_cc(avctx, ctx, pkt, &vanc_lines);
379 IDeckLinkVideoFrameAncillary *vanc;
380 int result = ctx->
dlo->CreateAncillaryData(bmdFormat10BitYUV, &vanc);
381 if (result != S_OK) {
389 for (
i = 0;
i < vanc_lines.num_lines;
i++) {
390 struct klvanc_line_s *
line = vanc_lines.lines[
i];
399 real_line = line->line_number;
401 result = vanc->GetBufferForVerticalBlankingLine(real_line, &buf);
402 if (result != S_OK) {
408 result = klvanc_generate_vanc_line_v210(ctx->vanc_ctx, line, (
uint8_t *) buf,
418 if (result != S_OK) {
424 for (
i = 0;
i < vanc_lines.num_lines;
i++)
425 klvanc_line_free(vanc_lines.lines[
i]);
467 if (decklink_construct_vanc(avctx, ctx, pkt, frame))
488 hr = ctx->
dlo->ScheduleVideoFrame((
class IDeckLinkVideoFrame *) frame,
495 " error %08x.\n", (uint32_t) hr);
499 ctx->
dlo->GetBufferedVideoFrameCount(&buffered);
501 if (pkt->
pts > 2 && buffered <= 2)
503 " Video may misbehave!\n");
508 if (ctx->
audio && ctx->
dlo->EndAudioPreroll() != S_OK) {
513 if (ctx->
dlo->StartScheduledPlayback(0, ctx->
bmd_tb_den, 1.0) != S_OK) {
530 ctx->
dlo->GetBufferedAudioSampleFrameCount(&buffered);
531 if (pkt->
pts > 1 && !buffered)
533 " Audio will misbehave!\n");
535 if (ctx->
dlo->ScheduleAudioSamples(pkt->
data, sample_count, pkt->
pts,
536 bmdAudioSampleRate48kHz,
NULL) != S_OK) {
562 if (klvanc_context_create(&ctx->vanc_ctx) < 0) {
580 if (ctx->
dl->QueryInterface(IID_IDeckLinkOutput, (
void **) &ctx->
dlo) != S_OK) {
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, AVCodecID codec_id, int height, int width)
enum AVFieldOrder field_order
Video only.
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
This structure describes decoded (raw) audio or video data.
#define pthread_mutex_lock(a)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
ptrdiff_t const GLvoid * data
virtual long STDMETHODCALLTYPE GetWidth(void)
#define AV_LOG_WARNING
Something somehow does not look correct.
ATSC A53 Part 4 Closed Captions.
static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
decklink_frame(struct decklink_ctx *ctx, AVPacket *avpacket, AVCodecID codec_id, int height, int width)
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void error(const char *err)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
This struct describes the properties of an encoded stream.
av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
virtual long STDMETHODCALLTYPE GetHeight(void)
AVStream ** streams
A list of all streams in the file.
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
int frames_buffer_available_spots
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
Main libavdevice API header.
virtual ULONG STDMETHODCALLTYPE Release(void)
AVCodecID
Identify the syntax and semantics of the bitstream.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void)
AVPacket * av_packet_clone(const AVPacket *src)
Create a new packet that references the same data as src.
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
char * url
input or output URL.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_decklink_list_output_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
enum AVMediaType codec_type
General type of the encoded data.
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void)
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction)
struct decklink_ctx * _ctx
av_cold int ff_decklink_write_header(AVFormatContext *avctx)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
virtual long STDMETHODCALLTYPE GetRowBytes(void)
#define pthread_mutex_unlock(a)
Passthrough codec, AVFrames wrapped in AVPacket.
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction)
virtual ULONG STDMETHODCALLTYPE AddRef(void)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags(void)
virtual HRESULT STDMETHODCALLTYPE GetBytes(void **buffer)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_decklink_cleanup(AVFormatContext *avctx)
int sample_rate
Audio only.
virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame *_frame, BMDOutputFrameCompletionResult result)
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
virtual ULONG STDMETHODCALLTYPE AddRef(void)
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
void * priv_data
Format private data.
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
and forward the result(frame or status change) to the corresponding input.If nothing is possible
virtual HRESULT STDMETHODCALLTYPE SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
IDeckLinkVideoFrameAncillary * _ancillary
AVCodecParameters * codecpar
Codec parameters associated with this stream.
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
uint16_t cdp_sequence_num
virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode **timecode)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
virtual ULONG STDMETHODCALLTYPE Release(void)