Go to the documentation of this file.
   31 #include <DeckLinkAPI.h> 
   43 #include "libklvanc/vanc.h" 
   44 #include "libklvanc/vanc-lines.h" 
   45 #include "libklvanc/pixels.h" 
   63           return ((
GetWidth() + 47) / 48) * 128;
 
   68             return bmdFormat8BitYUV;
 
   70             return bmdFormat10BitYUV;
 
   72     virtual BMDFrameFlags  STDMETHODCALLTYPE 
GetFlags      (
void)
 
   75            return _avframe->
linesize[0] < 0 ? bmdFrameFlagFlipVertical : bmdFrameFlagDefault;
 
   77            return bmdFrameFlagDefault;
 
   93     virtual HRESULT STDMETHODCALLTYPE 
GetTimecode     (BMDTimecodeFormat 
format, IDeckLinkTimecode **timecode) { 
return S_FALSE; }
 
   94     virtual HRESULT STDMETHODCALLTYPE 
GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
 
  104     virtual HRESULT STDMETHODCALLTYPE 
SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
 
  112     virtual HRESULT STDMETHODCALLTYPE 
QueryInterface(REFIID iid, LPVOID *ppv) { 
return E_NOINTERFACE; }
 
  149         if (
frame->_avpacket)
 
  153         ctx->frames_buffer_available_spots++;
 
  160     virtual HRESULT STDMETHODCALLTYPE 
QueryInterface(REFIID iid, LPVOID *ppv) { 
return E_NOINTERFACE; }
 
  161     virtual ULONG   STDMETHODCALLTYPE 
AddRef(
void)                            { 
return 1; }
 
  162     virtual ULONG   STDMETHODCALLTYPE 
Release(
void)                           { 
return 1; }
 
  179                    " Only AV_PIX_FMT_UYVY422 is supported.\n");
 
  182         ctx->raw_format = bmdFormat8BitYUV;
 
  185                " Only V210 and wrapped frame with AV_PIX_FMT_UYVY422 are supported.\n");
 
  188         ctx->raw_format = bmdFormat10BitYUV;
 
  198                " Check available formats with -list_formats 1.\n");
 
  201     if (
ctx->supports_vanc && 
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputVANC) != S_OK) {
 
  203         ctx->supports_vanc = 0;
 
  205     if (!
ctx->supports_vanc && 
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputFlagDefault) != S_OK) {
 
  212     ctx->dlo->SetScheduledFrameCompletionCallback(
ctx->output_callback);
 
  216         ctx->frames_preroll /= 1000;
 
  219     ctx->frames_buffer = 
ctx->frames_preroll * 2;
 
  220     ctx->frames_buffer = 
FFMIN(
ctx->frames_buffer, 60);
 
  223     ctx->frames_buffer_available_spots = 
ctx->frames_buffer;
 
  226            avctx->
url, 
ctx->frames_preroll, 
ctx->frames_buffer);
 
  246     if (
c->sample_rate != 48000) {
 
  248                " Only 48kHz is supported.\n");
 
  251     if (
c->channels != 2 && 
c->channels != 8 && 
c->channels != 16) {
 
  253                " Only 2, 8 or 16 channels are supported.\n");
 
  256     if (
ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
 
  257                                     bmdAudioSampleType16bitInteger,
 
  259                                     bmdAudioOutputStreamTimestamped) != S_OK) {
 
  263     if (
ctx->dlo->BeginAudioPreroll() != S_OK) {
 
  270     ctx->channels = 
c->channels;
 
  282     if (
ctx->playback_started) {
 
  284         ctx->dlo->StopScheduledPlayback(
ctx->last_pts * 
ctx->bmd_tb_num,
 
  285                                         &actual, 
ctx->bmd_tb_den);
 
  286         ctx->dlo->DisableVideoOutput();
 
  288             ctx->dlo->DisableAudioOutput();
 
  293     if (
ctx->output_callback)
 
  294         delete ctx->output_callback;
 
  300     klvanc_context_destroy(
ctx->vanc_ctx);
 
  310                          AVPacket *
pkt, 
struct klvanc_line_set_s *vanc_lines)
 
  312     struct klvanc_packet_eia_708b_s *cdp;
 
  325     ret = klvanc_create_eia708_cdp(&cdp);
 
  329     ret = klvanc_set_framerate_EIA_708B(cdp, 
ctx->bmd_tb_num, 
ctx->bmd_tb_den);
 
  332                ctx->bmd_tb_num, 
ctx->bmd_tb_den);
 
  333         klvanc_destroy_eia708_cdp(cdp);
 
  337     if (cc_count > KLVANC_MAX_CC_COUNT) {
 
  339         cc_count = KLVANC_MAX_CC_COUNT;
 
  343     cdp->header.ccdata_present = 1;
 
  344     cdp->header.caption_service_active = 1;
 
  345     cdp->ccdata.cc_count = cc_count;
 
  346     for (
i = 0; 
i < cc_count; 
i++) {
 
  347         if (
data [3*
i] & 0x04)
 
  348             cdp->ccdata.cc[
i].cc_valid = 1;
 
  349         cdp->ccdata.cc[
i].cc_type = 
data[3*
i] & 0x03;
 
  350         cdp->ccdata.cc[
i].cc_data[0] = 
data[3*
i+1];
 
  351         cdp->ccdata.cc[
i].cc_data[1] = 
data[3*
i+2];
 
  354     klvanc_finalize_EIA_708B(cdp, 
ctx->cdp_sequence_num++);
 
  355     ret = klvanc_convert_EIA_708B_to_words(cdp, &cdp_words, &
len);
 
  356     klvanc_destroy_eia708_cdp(cdp);
 
  362     ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, cdp_words, 
len, 11, 0);
 
  373     struct klvanc_line_set_s vanc_lines = { 0 };
 
  376     if (!
ctx->supports_vanc)
 
  379     construct_cc(avctx, 
ctx, 
pkt, &vanc_lines);
 
  381     IDeckLinkVideoFrameAncillary *vanc;
 
  382     int result = 
ctx->dlo->CreateAncillaryData(bmdFormat10BitYUV, &vanc);
 
  391     for (
i = 0; 
i < vanc_lines.num_lines; 
i++) {
 
  392         struct klvanc_line_s *
line = vanc_lines.lines[
i];
 
  401         real_line = 
line->line_number;
 
  403         result = vanc->GetBufferForVerticalBlankingLine(real_line, &buf);
 
  410         result = klvanc_generate_vanc_line_v210(
ctx->vanc_ctx, 
line, (uint8_t *) buf,
 
  426     for (
i = 0; 
i < vanc_lines.num_lines; 
i++)
 
  427         klvanc_line_free(vanc_lines.lines[
i]);
 
  446             tmp->width  != 
ctx->bmd_width ||
 
  447             tmp->height != 
ctx->bmd_height) {
 
  469         if (decklink_construct_vanc(avctx, 
ctx, 
pkt, 
frame))
 
  483     while (
ctx->frames_buffer_available_spots == 0) {
 
  486     ctx->frames_buffer_available_spots--;
 
  490     hr = 
ctx->dlo->ScheduleVideoFrame((
class IDeckLinkVideoFrame *) 
frame,
 
  492                                       ctx->bmd_tb_num, 
ctx->bmd_tb_den);
 
  497                 " error %08x.\n", (uint32_t) hr);
 
  501     ctx->dlo->GetBufferedVideoFrameCount(&buffered);
 
  503     if (
pkt->
pts > 2 && buffered <= 2)
 
  505                " Video may misbehave!\n");
 
  508     if (!
ctx->playback_started && 
pkt->
pts > 
ctx->frames_preroll) {
 
  510         if (
ctx->audio && 
ctx->dlo->EndAudioPreroll() != S_OK) {
 
  515         if (
ctx->dlo->StartScheduledPlayback(0, 
ctx->bmd_tb_den, 1.0) != S_OK) {
 
  519         ctx->playback_started = 1;
 
  529     int sample_count = 
pkt->
size / (
ctx->channels << 1);
 
  532     ctx->dlo->GetBufferedAudioSampleFrameCount(&buffered);
 
  533     if (
pkt->
pts > 1 && !buffered)
 
  535                " Audio will misbehave!\n");
 
  538                                        bmdAudioSampleRate48kHz, 
NULL) != S_OK) {
 
  566     if (klvanc_context_create(&
ctx->vanc_ctx) < 0) {
 
  570     ctx->supports_vanc = 1;
 
  574     if (
ctx->list_devices) {
 
  584     if (
ctx->dl->QueryInterface(IID_IDeckLinkOutput, (
void **) &
ctx->dlo) != S_OK) {
 
  592     if (
ctx->list_formats) {
 
  
static void error(const char *err)
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define AV_LOG_WARNING
Something somehow does not look correct.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVMediaType codec_type
General type of the encoded data.
This struct describes the properties of an encoded stream.
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
virtual ULONG STDMETHODCALLTYPE Release(void)
AVStream ** streams
A list of all streams in the file.
virtual long STDMETHODCALLTYPE GetHeight(void)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
virtual ULONG STDMETHODCALLTYPE AddRef(void)
IDeckLinkVideoFrameAncillary * _ancillary
static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
av_cold int ff_decklink_write_header(AVFormatContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags(void)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
virtual HRESULT STDMETHODCALLTYPE SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction)
int ff_decklink_list_output_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define pthread_mutex_unlock(a)
AVCodecID
Identify the syntax and semantics of the bitstream.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
char * url
input or output URL.
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
static const BMDLinkConfiguration decklink_link_conf_map[]
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
void ff_decklink_cleanup(AVFormatContext *avctx)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode **timecode)
decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, AVCodecID codec_id, int height, int width)
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
virtual ULONG STDMETHODCALLTYPE Release(void)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
virtual ULONG STDMETHODCALLTYPE AddRef(void)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
struct decklink_ctx * _ctx
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
virtual long STDMETHODCALLTYPE GetWidth(void)
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void)
virtual long STDMETHODCALLTYPE GetRowBytes(void)
virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame *_frame, BMDOutputFrameCompletionResult result)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
decklink_frame(struct decklink_ctx *ctx, AVPacket *avpacket, AVCodecID codec_id, int height, int width)
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
void * priv_data
Format private data.
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
AVPacket * av_packet_clone(const AVPacket *src)
Create a new packet that references the same data as src.
static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
#define pthread_mutex_lock(a)
virtual HRESULT STDMETHODCALLTYPE GetBytes(void **buffer)