FFmpeg
decklink_dec.cpp
Go to the documentation of this file.
1 /*
2  * Blackmagic DeckLink input
3  * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
4  * Copyright (c) 2014 Rafaël Carré
5  * Copyright (c) 2017 Akamai Technologies, Inc.
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <atomic>
25 #include <vector>
26 using std::atomic;
27 
28 /* Include internal.h first to avoid conflict between winsock.h (used by
29  * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
30 extern "C" {
31 #include "libavformat/internal.h"
32 }
33 
34 #include <DeckLinkAPI.h>
35 
36 extern "C" {
37 #include "config.h"
38 #include "libavformat/avformat.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/avutil.h"
41 #include "libavutil/common.h"
42 #include "libavutil/imgutils.h"
43 #include "libavutil/intreadwrite.h"
44 #include "libavutil/time.h"
45 #include "libavutil/timecode.h"
46 #include "libavutil/mathematics.h"
47 #include "libavutil/reverse.h"
48 #include "avdevice.h"
49 #if CONFIG_LIBZVBI
50 #include <libzvbi.h>
51 #endif
52 }
53 
54 #include "decklink_common.h"
55 #include "decklink_dec.h"
56 
57 #define MAX_WIDTH_VANC 1920
58 const BMDDisplayMode AUTODETECT_DEFAULT_MODE = bmdModeNTSC;
59 
60 typedef struct VANCLineNumber {
61  BMDDisplayMode mode;
65  int vanc_end;
67 
68 /* These VANC line numbers need not be very accurate. In any case
69  * GetBufferForVerticalBlankingLine() will return an error when invalid
70  * ancillary line number was requested. We just need to make sure that the
71  * entire VANC region is covered, while making sure we don't decode VANC of
72  * another source during switching*/
74  /* SD Modes */
75 
76  {bmdModeNTSC, 11, 19, 274, 282},
77  {bmdModeNTSC2398, 11, 19, 274, 282},
78  {bmdModePAL, 7, 22, 320, 335},
79  {bmdModeNTSCp, 11, -1, -1, 39},
80  {bmdModePALp, 7, -1, -1, 45},
81 
82  /* HD 1080 Modes */
83 
84  {bmdModeHD1080p2398, 8, -1, -1, 42},
85  {bmdModeHD1080p24, 8, -1, -1, 42},
86  {bmdModeHD1080p25, 8, -1, -1, 42},
87  {bmdModeHD1080p2997, 8, -1, -1, 42},
88  {bmdModeHD1080p30, 8, -1, -1, 42},
89  {bmdModeHD1080i50, 8, 20, 570, 585},
90  {bmdModeHD1080i5994, 8, 20, 570, 585},
91  {bmdModeHD1080i6000, 8, 20, 570, 585},
92  {bmdModeHD1080p50, 8, -1, -1, 42},
93  {bmdModeHD1080p5994, 8, -1, -1, 42},
94  {bmdModeHD1080p6000, 8, -1, -1, 42},
95 
96  /* HD 720 Modes */
97 
98  {bmdModeHD720p50, 8, -1, -1, 26},
99  {bmdModeHD720p5994, 8, -1, -1, 26},
100  {bmdModeHD720p60, 8, -1, -1, 26},
101 
102  /* For all other modes, for which we don't support VANC */
103  {bmdModeUnknown, 0, -1, -1, -1}
104 };
105 
106 class decklink_allocator : public IDeckLinkMemoryAllocator
107 {
108 public:
109  decklink_allocator(): _refs(1) { }
110  virtual ~decklink_allocator() { }
111 
112  // IDeckLinkMemoryAllocator methods
113  virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void* *allocatedBuffer)
114  {
115  void *buf = av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
116  if (!buf)
117  return E_OUTOFMEMORY;
118  *allocatedBuffer = buf;
119  return S_OK;
120  }
121  virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer)
122  {
123  av_free(buffer);
124  return S_OK;
125  }
126  virtual HRESULT STDMETHODCALLTYPE Commit() { return S_OK; }
127  virtual HRESULT STDMETHODCALLTYPE Decommit() { return S_OK; }
128 
129  // IUnknown methods
130  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
131  virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
132  virtual ULONG STDMETHODCALLTYPE Release(void)
133  {
134  int ret = --_refs;
135  if (!ret)
136  delete this;
137  return ret;
138  }
139 
140 private:
141  std::atomic<int> _refs;
142 };
143 
144 extern "C" {
145 static void decklink_object_free(void *opaque, uint8_t *data)
146 {
147  IUnknown *obj = (class IUnknown *)opaque;
148  obj->Release();
149 }
150 }
151 
152 static int get_vanc_line_idx(BMDDisplayMode mode)
153 {
154  unsigned int i;
155  for (i = 0; i < FF_ARRAY_ELEMS(vanc_line_numbers); i++) {
156  if (mode == vanc_line_numbers[i].mode)
157  return i;
158  }
159  /* Return the VANC idx for Unknown mode */
160  return i - 1;
161 }
162 
163 static inline void clear_parity_bits(uint16_t *buf, int len) {
164  int i;
165  for (i = 0; i < len; i++)
166  buf[i] &= 0xff;
167 }
168 
169 static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum) {
170  int i;
171  uint16_t vanc_sum = 0;
172  for (i = 3; i < len - 1; i++) {
173  uint16_t v = buf[i];
174  int np = v >> 8;
175  int p = av_parity(v & 0xff);
176  if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
177  // Parity check failed
178  return -1;
179  }
180  vanc_sum += v;
181  }
182  vanc_sum &= 0x1ff;
183  vanc_sum |= ((~vanc_sum & 0x100) << 1);
184  if (checksum != vanc_sum) {
185  // Checksum verification failed
186  return -1;
187  }
188  return 0;
189 }
190 
191 /* The 10-bit VANC data is packed in V210, we only need the luma component. */
192 static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
193 {
194  int i;
195  for (i = 0; i < width / 3; i++) {
196  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
197  *dst++ = src[4] + ((src[5] & 3) << 8);
198  *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
199  src += 8;
200  }
201 }
202 
203 static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
204 {
205  int i;
206  for (i = 0; i < width * 2 / 3; i++) {
207  *dst++ = src[0] + ((src[1] & 3) << 8);
208  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
209  *dst++ = (src[2] >> 4) + ((src[3] & 63) << 4);
210  src += 4;
211  }
212 }
213 
215 {
216  uint8_t ret = (line < 313) << 5;
217  if (line >= 7 && line <= 22)
218  ret += line;
219  if (line >= 320 && line <= 335)
220  ret += (line - 313);
221  return ret;
222 }
223 
224 static void fill_data_unit_head(int line, uint8_t *tgt)
225 {
226  tgt[0] = 0x02; // data_unit_id
227  tgt[1] = 0x2c; // data_unit_length
228  tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
229  tgt[3] = 0xe4; // framing code
230 }
231 
232 #if CONFIG_LIBZVBI
233 static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
234 {
235  vbi_bit_slicer slicer;
236 
237  vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
238 
239  if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
240  return tgt;
241 
243 
244  return tgt + 46;
245 }
246 
247 static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
248 {
249  uint8_t y[720];
250  uint8_t *py = y;
251  uint8_t *pend = y + 720;
252  /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
253  * so we extract the 8 MSBs of the luma component, that is enough for
254  * teletext bit slicing. */
255  while (py < pend) {
256  *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
257  *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
258  *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
259  src += 8;
260  }
261  return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
262 }
263 #endif
264 
266 {
267  int i;
268 
269  if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
270  return tgt;
271 
272  fill_data_unit_head(line, tgt);
273 
274  py += 3;
275  tgt += 4;
276 
277  for (i = 0; i < 42; i++)
278  *tgt++ = ff_reverse[py[i] & 255];
279 
280  return tgt;
281 }
282 
283 static int linemask_matches(int line, int64_t mask)
284 {
285  int shift = -1;
286  if (line >= 6 && line <= 22)
287  shift = line - 6;
288  if (line >= 318 && line <= 335)
289  shift = line - 318 + 17;
290  return shift >= 0 && ((1ULL << shift) & mask);
291 }
292 
293 static uint8_t* teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
294 {
295  if (py < pend - 9) {
296  if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) { // identifier, identifier, format code for WST teletext
297  uint16_t *descriptors = py + 4;
298  int i;
299  py += 9;
300  for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
301  int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
302  if (line && linemask_matches(line, wanted_lines))
303  tgt = teletext_data_unit_from_op47_vbi_packet(line, py, tgt);
304  }
305  }
306  }
307  return tgt;
308 }
309 
310 static uint8_t* teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
311 {
312  uint16_t did = py[0]; // data id
313  uint16_t sdid = py[1]; // secondary data id
314  uint16_t dc = py[2] & 255; // data count
315  py += 3;
316  pend = FFMIN(pend, py + dc);
317  if (did == 0x143 && sdid == 0x102) { // subtitle distribution packet
318  tgt = teletext_data_unit_from_op47_data(py, pend, tgt, wanted_lines);
319  } else if (allow_multipacket && did == 0x143 && sdid == 0x203) { // VANC multipacket
320  py += 2; // priority, line/field
321  while (py < pend - 3) {
322  tgt = teletext_data_unit_from_ancillary_packet(py, pend, tgt, wanted_lines, 0);
323  py += 4 + (py[2] & 255); // ndid, nsdid, ndc, line/field
324  }
325  }
326  return tgt;
327 }
328 
329 static uint8_t *vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words,
330  unsigned &cc_count)
331 {
332  size_t i, len = (buf[5] & 0xff) + 6 + 1;
333  uint8_t cdp_sum, rate;
334  uint16_t hdr, ftr;
335  uint8_t *cc;
336  uint16_t *cdp = &buf[6]; // CDP follows
337  if (cdp[0] != 0x96 || cdp[1] != 0x69) {
338  av_log(avctx, AV_LOG_WARNING, "Invalid CDP header 0x%.2x 0x%.2x\n", cdp[0], cdp[1]);
339  return NULL;
340  }
341 
342  len -= 7; // remove VANC header and checksum
343 
344  if (cdp[2] != len) {
345  av_log(avctx, AV_LOG_WARNING, "CDP len %d != %zu\n", cdp[2], len);
346  return NULL;
347  }
348 
349  cdp_sum = 0;
350  for (i = 0; i < len - 1; i++)
351  cdp_sum += cdp[i];
352  cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
353  if (cdp[len - 1] != cdp_sum) {
354  av_log(avctx, AV_LOG_WARNING, "CDP checksum invalid 0x%.4x != 0x%.4x\n", cdp_sum, cdp[len-1]);
355  return NULL;
356  }
357 
358  rate = cdp[3];
359  if (!(rate & 0x0f)) {
360  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
361  return NULL;
362  }
363  rate >>= 4;
364  if (rate > 8) {
365  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
366  return NULL;
367  }
368 
369  if (!(cdp[4] & 0x43)) /* ccdata_present | caption_service_active | reserved */ {
370  av_log(avctx, AV_LOG_WARNING, "CDP flags invalid (0x%.2x)\n", cdp[4]);
371  return NULL;
372  }
373 
374  hdr = (cdp[5] << 8) | cdp[6];
375  if (cdp[7] != 0x72) /* ccdata_id */ {
376  av_log(avctx, AV_LOG_WARNING, "Invalid ccdata_id 0x%.2x\n", cdp[7]);
377  return NULL;
378  }
379 
380  cc_count = cdp[8];
381  if (!(cc_count & 0xe0)) {
382  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count 0x%.2x\n", cc_count);
383  return NULL;
384  }
385 
386  cc_count &= 0x1f;
387  if ((len - 13) < cc_count * 3) {
388  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count %d (> %zu)\n", cc_count * 3, len - 13);
389  return NULL;
390  }
391 
392  if (cdp[len - 4] != 0x74) /* footer id */ {
393  av_log(avctx, AV_LOG_WARNING, "Invalid footer id 0x%.2x\n", cdp[len-4]);
394  return NULL;
395  }
396 
397  ftr = (cdp[len - 3] << 8) | cdp[len - 2];
398  if (ftr != hdr) {
399  av_log(avctx, AV_LOG_WARNING, "Header 0x%.4x != Footer 0x%.4x\n", hdr, ftr);
400  return NULL;
401  }
402 
403  cc = (uint8_t *)av_malloc(cc_count * 3);
404  if (cc == NULL) {
405  av_log(avctx, AV_LOG_WARNING, "CC - av_malloc failed for cc_count = %d\n", cc_count);
406  return NULL;
407  }
408 
409  for (size_t i = 0; i < cc_count; i++) {
410  cc[3*i + 0] = cdp[9 + 3*i+0] /* & 3 */;
411  cc[3*i + 1] = cdp[9 + 3*i+1];
412  cc[3*i + 2] = cdp[9 + 3*i+2];
413  }
414 
415  cc_count *= 3;
416  return cc;
417 }
418 
419 static uint8_t *get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width,
420  uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
421 {
422  decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
423  uint16_t *max_buf = buf + width;
424 
425  while (buf < max_buf - 6) {
426  int len;
427  uint16_t did = buf[3] & 0xFF; // data id
428  uint16_t sdid = buf[4] & 0xFF; // secondary data id
429  /* Check for VANC header */
430  if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
431  return tgt;
432  }
433 
434  len = (buf[5] & 0xff) + 6 + 1;
435  if (len > max_buf - buf) {
436  av_log(avctx, AV_LOG_WARNING, "Data Count (%d) > data left (%zu)\n",
437  len, max_buf - buf);
438  return tgt;
439  }
440 
441  if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->teletext_lines &&
442  width == 1920 && tgt_size >= 1920) {
443  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
444  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
445  goto skip_packet;
446  }
447  tgt = teletext_data_unit_from_ancillary_packet(buf + 3, buf + len, tgt, cctx->teletext_lines, 1);
448  } else if (did == 0x61 && sdid == 0x01) {
449  unsigned int data_len;
450  uint8_t *data;
451  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
452  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
453  goto skip_packet;
454  }
455  clear_parity_bits(buf, len);
456  data = vanc_to_cc(avctx, buf, width, data_len);
457  if (data) {
458  if (av_packet_add_side_data(pkt, AV_PKT_DATA_A53_CC, data, data_len) < 0)
459  av_free(data);
460  }
461  } else {
462  av_log(avctx, AV_LOG_DEBUG, "Unknown meta data DID = 0x%.2x SDID = 0x%.2x\n",
463  did, sdid);
464  }
465 skip_packet:
466  buf += len;
467  }
468 
469  return tgt;
470 }
471 
473 {
474  struct decklink_cctx *ctx = (struct decklink_cctx *)avctx->priv_data;
475  memset(q, 0, sizeof(AVPacketQueue));
478  q->avctx = avctx;
479  q->max_q_size = ctx->queue_size;
480 }
481 
483 {
484  AVPacketList *pkt, *pkt1;
485 
487  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
488  pkt1 = pkt->next;
489  av_packet_unref(&pkt->pkt);
490  av_freep(&pkt);
491  }
492  q->last_pkt = NULL;
493  q->first_pkt = NULL;
494  q->nb_packets = 0;
495  q->size = 0;
497 }
498 
500 {
504 }
505 
506 static unsigned long long avpacket_queue_size(AVPacketQueue *q)
507 {
508  unsigned long long size;
510  size = q->size;
512  return size;
513 }
514 
516 {
517  AVPacketList *pkt1;
518 
519  // Drop Packet if queue size is > maximum queue size
520  if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
521  av_packet_unref(pkt);
522  av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
523  return -1;
524  }
525  /* ensure the packet is reference counted */
526  if (av_packet_make_refcounted(pkt) < 0) {
527  av_packet_unref(pkt);
528  return -1;
529  }
530 
531  pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
532  if (!pkt1) {
533  av_packet_unref(pkt);
534  return -1;
535  }
536  av_packet_move_ref(&pkt1->pkt, pkt);
537  pkt1->next = NULL;
538 
540 
541  if (!q->last_pkt) {
542  q->first_pkt = pkt1;
543  } else {
544  q->last_pkt->next = pkt1;
545  }
546 
547  q->last_pkt = pkt1;
548  q->nb_packets++;
549  q->size += pkt1->pkt.size + sizeof(*pkt1);
550 
552 
554  return 0;
555 }
556 
558 {
559  AVPacketList *pkt1;
560  int ret;
561 
563 
564  for (;; ) {
565  pkt1 = q->first_pkt;
566  if (pkt1) {
567  q->first_pkt = pkt1->next;
568  if (!q->first_pkt) {
569  q->last_pkt = NULL;
570  }
571  q->nb_packets--;
572  q->size -= pkt1->pkt.size + sizeof(*pkt1);
573  *pkt = pkt1->pkt;
574  av_free(pkt1);
575  ret = 1;
576  break;
577  } else if (!block) {
578  ret = 0;
579  break;
580  } else {
581  pthread_cond_wait(&q->cond, &q->mutex);
582  }
583  }
585  return ret;
586 }
587 
588 static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
589 {
590  const uint8_t KLV_DID = 0x44;
591  const uint8_t KLV_IN_VANC_SDID = 0x04;
592 
593  struct KLVPacket
594  {
595  uint16_t sequence_counter;
596  std::vector<uint8_t> data;
597  };
598 
599  size_t total_size = 0;
600  std::vector<std::vector<KLVPacket>> klv_packets(256);
601 
602  IDeckLinkVideoFrameAncillaryPackets *packets = nullptr;
603  if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (void**)&packets) != S_OK)
604  return;
605 
606  IDeckLinkAncillaryPacketIterator *it = nullptr;
607  if (packets->GetPacketIterator(&it) != S_OK) {
608  packets->Release();
609  return;
610  }
611 
612  IDeckLinkAncillaryPacket *packet = nullptr;
613  while (it->Next(&packet) == S_OK) {
614  uint8_t *data = nullptr;
615  uint32_t size = 0;
616 
617  if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
618  av_log(avctx, AV_LOG_DEBUG, "Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
619 
620  if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (const void**) &data, &size) == S_OK) {
621  // MID and PSC
622  if (size > 3) {
623  uint8_t mid = data[0];
624  uint16_t psc = data[1] << 8 | data[2];
625 
626  av_log(avctx, AV_LOG_DEBUG, "KLV with MID: %d and PSC: %d\n", mid, psc);
627 
628  auto& list = klv_packets[mid];
629  uint16_t expected_psc = list.size() + 1;
630 
631  if (psc == expected_psc) {
632  uint32_t data_len = size - 3;
633  total_size += data_len;
634 
635  KLVPacket packet{ psc };
636  packet.data.resize(data_len);
637  memcpy(packet.data.data(), data + 3, data_len);
638 
639  list.push_back(std::move(packet));
640  } else {
641  av_log(avctx, AV_LOG_WARNING, "Out of order PSC: %d for MID: %d\n", psc, mid);
642 
643  if (!list.empty()) {
644  for (auto& klv : list)
645  total_size -= klv.data.size();
646 
647  list.clear();
648  }
649  }
650  }
651  }
652  }
653 
654  packet->Release();
655  }
656 
657  it->Release();
658  packets->Release();
659 
660  if (total_size > 0) {
661  std::vector<uint8_t> klv;
662  klv.reserve(total_size);
663 
664  for (size_t i = 0; i < klv_packets.size(); ++i) {
665  auto& list = klv_packets[i];
666 
667  if (list.empty())
668  continue;
669 
670  av_log(avctx, AV_LOG_DEBUG, "Joining MID: %d\n", (int)i);
671 
672  for (auto& packet : list)
673  klv.insert(klv.end(), packet.data.begin(), packet.data.end());
674  }
675 
676  AVPacket klv_packet;
677  av_init_packet(&klv_packet);
678  klv_packet.pts = pts;
679  klv_packet.dts = pts;
680  klv_packet.flags |= AV_PKT_FLAG_KEY;
681  klv_packet.stream_index = ctx->klv_st->index;
682  klv_packet.data = klv.data();
683  klv_packet.size = klv.size();
684 
685  if (avpacket_queue_put(&ctx->queue, &klv_packet) < 0) {
686  ++ctx->dropped;
687  }
688  }
689 }
690 
691 class decklink_input_callback : public IDeckLinkInputCallback
692 {
693 public:
696 
697  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
698  virtual ULONG STDMETHODCALLTYPE AddRef(void);
699  virtual ULONG STDMETHODCALLTYPE Release(void);
700  virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
701  virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
702 
703 private:
704  std::atomic<int> _refs;
707  int no_video;
710 };
711 
713 {
714  avctx = _avctx;
715  decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
716  ctx = (struct decklink_ctx *)cctx->ctx;
717  no_video = 0;
719 }
720 
722 {
723 }
724 
726 {
727  return ++_refs;
728 }
729 
731 {
732  int ret = --_refs;
733  if (!ret)
734  delete this;
735  return ret;
736 }
737 
738 static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
739  IDeckLinkAudioInputPacket *audioFrame,
740  int64_t wallclock,
741  int64_t abs_wallclock,
742  DecklinkPtsSource pts_src,
743  AVRational time_base, int64_t *initial_pts,
744  int copyts)
745 {
746  int64_t pts = AV_NOPTS_VALUE;
747  BMDTimeValue bmd_pts;
748  BMDTimeValue bmd_duration;
749  HRESULT res = E_INVALIDARG;
750  switch (pts_src) {
751  case PTS_SRC_AUDIO:
752  if (audioFrame)
753  res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
754  break;
755  case PTS_SRC_VIDEO:
756  if (videoFrame)
757  res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
758  break;
759  case PTS_SRC_REFERENCE:
760  if (videoFrame)
761  res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
762  break;
763  case PTS_SRC_WALLCLOCK:
764  /* fall through */
766  {
767  /* MSVC does not support compound literals like AV_TIME_BASE_Q
768  * in C++ code (compiler error C4576) */
769  AVRational timebase;
770  timebase.num = 1;
771  timebase.den = AV_TIME_BASE;
772  if (pts_src == PTS_SRC_WALLCLOCK)
773  pts = av_rescale_q(wallclock, timebase, time_base);
774  else
775  pts = av_rescale_q(abs_wallclock, timebase, time_base);
776  break;
777  }
778  }
779  if (res == S_OK)
780  pts = bmd_pts / time_base.num;
781 
782  if (!copyts) {
783  if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
784  *initial_pts = pts;
785  if (*initial_pts != AV_NOPTS_VALUE)
786  pts -= *initial_pts;
787  }
788 
789  return pts;
790 }
791 
792 static int get_bmd_timecode(AVFormatContext *avctx, AVTimecode *tc, AVRational frame_rate, BMDTimecodeFormat tc_format, IDeckLinkVideoInputFrame *videoFrame)
793 {
794  IDeckLinkTimecode *timecode;
795  int ret = AVERROR(ENOENT);
796 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
797  int hfr = (tc_format == bmdTimecodeRP188HighFrameRate);
798 #else
799  int hfr = 0;
800 #endif
801  if (videoFrame->GetTimecode(tc_format, &timecode) == S_OK) {
802  uint8_t hh, mm, ss, ff;
803  if (timecode->GetComponents(&hh, &mm, &ss, &ff) == S_OK) {
804  int flags = (timecode->GetFlags() & bmdTimecodeIsDropFrame) ? AV_TIMECODE_FLAG_DROPFRAME : 0;
805  if (!hfr && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1)
806  ff = ff << 1 | !!(timecode->GetFlags() & bmdTimecodeFieldMark);
807  ret = av_timecode_init_from_components(tc, frame_rate, flags, hh, mm, ss, ff, avctx);
808  }
809  timecode->Release();
810  }
811  return ret;
812 }
813 
814 static int get_frame_timecode(AVFormatContext *avctx, decklink_ctx *ctx, AVTimecode *tc, IDeckLinkVideoInputFrame *videoFrame)
815 {
816  AVRational frame_rate = ctx->video_st->r_frame_rate;
817  int ret;
818  /* 50/60 fps content has alternating VITC1 and VITC2 timecode (see SMPTE ST
819  * 12-2, section 7), so the native ordering of RP188Any (HFR, VITC1, LTC,
820  * VITC2) would not work because LTC might not contain the field flag.
821  * Therefore we query the types manually. */
822  if (ctx->tc_format == bmdTimecodeRP188Any && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1) {
823 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
824  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188HighFrameRate, videoFrame);
825  if (ret == AVERROR(ENOENT))
826 #endif
827  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC1, videoFrame);
828  if (ret == AVERROR(ENOENT))
829  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC2, videoFrame);
830  if (ret == AVERROR(ENOENT))
831  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188LTC, videoFrame);
832  } else {
833  ret = get_bmd_timecode(avctx, tc, frame_rate, ctx->tc_format, videoFrame);
834  }
835  return ret;
836 }
837 
839  IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
840 {
841  void *frameBytes;
842  void *audioFrameBytes;
843  BMDTimeValue frameTime;
844  BMDTimeValue frameDuration;
845  int64_t wallclock = 0, abs_wallclock = 0;
846  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
847 
848  if (ctx->autodetect) {
849  if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
850  ctx->bmd_mode == bmdModeUnknown)
851  {
853  }
854  return S_OK;
855  }
856 
857  // Drop the frames till system's timestamp aligns with the configured value.
858  if (0 == ctx->frameCount && cctx->timestamp_align) {
859  AVRational remainder = av_make_q(av_gettime() % cctx->timestamp_align, 1000000);
860  AVRational frame_duration = av_inv_q(ctx->video_st->r_frame_rate);
861  if (av_cmp_q(remainder, frame_duration) > 0) {
862  ++ctx->dropped;
863  return S_OK;
864  }
865  }
866 
867  ctx->frameCount++;
869  wallclock = av_gettime_relative();
871  abs_wallclock = av_gettime();
872 
873  // Handle Video Frame
874  if (videoFrame) {
875  AVPacket pkt;
876  av_init_packet(&pkt);
877  if (ctx->frameCount % 25 == 0) {
878  unsigned long long qsize = avpacket_queue_size(&ctx->queue);
880  "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
881  ctx->frameCount,
882  videoFrame->GetRowBytes() * videoFrame->GetHeight(),
883  (double)qsize / 1024 / 1024);
884  }
885 
886  videoFrame->GetBytes(&frameBytes);
887  videoFrame->GetStreamTime(&frameTime, &frameDuration,
889 
890  if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
891  if (ctx->draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
892  unsigned bars[8] = {
893  0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
894  0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
895  int width = videoFrame->GetWidth();
896  int height = videoFrame->GetHeight();
897  unsigned *p = (unsigned *)frameBytes;
898 
899  for (int y = 0; y < height; y++) {
900  for (int x = 0; x < width; x += 2)
901  *p++ = bars[(x * 8) / width];
902  }
903  }
904 
905  if (!no_video) {
906  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
907  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
908  }
909  no_video = 1;
910  } else {
911  if (no_video) {
912  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
913  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
914  }
915  no_video = 0;
916 
917  // Handle Timecode (if requested)
918  if (ctx->tc_format) {
919  AVTimecode tcr;
920  if (get_frame_timecode(avctx, ctx, &tcr, videoFrame) >= 0) {
921  char tcstr[AV_TIMECODE_STR_SIZE];
922  const char *tc = av_timecode_make_string(&tcr, tcstr, 0);
923  if (tc) {
924  AVDictionary* metadata_dict = NULL;
925  int metadata_len;
926  uint8_t* packed_metadata;
927 
928  if (av_cmp_q(ctx->video_st->r_frame_rate, av_make_q(60, 1)) < 1) {
929  uint32_t tc_data = av_timecode_get_smpte_from_framenum(&tcr, 0);
930  int size = sizeof(uint32_t) * 4;
931  uint32_t *sd = (uint32_t *)av_packet_new_side_data(&pkt, AV_PKT_DATA_S12M_TIMECODE, size);
932 
933  if (sd) {
934  *sd = 1; // one TC
935  *(sd + 1) = tc_data; // TC
936  }
937  }
938 
939  if (av_dict_set(&metadata_dict, "timecode", tc, 0) >= 0) {
940  packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
941  av_dict_free(&metadata_dict);
942  if (packed_metadata) {
943  if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
944  av_freep(&packed_metadata);
945  else if (!ctx->tc_seen)
947  }
948  }
949  }
950  } else {
951  av_log(avctx, AV_LOG_DEBUG, "Unable to find timecode.\n");
952  }
953  }
954  }
955 
956  if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
957 
958  av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
959  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
960  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
961  return S_OK;
962  }
963 
964  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
965  pkt.dts = pkt.pts;
966 
967  pkt.duration = frameDuration;
968  //To be made sure it still applies
969  pkt.flags |= AV_PKT_FLAG_KEY;
970  pkt.stream_index = ctx->video_st->index;
971  pkt.data = (uint8_t *)frameBytes;
972  pkt.size = videoFrame->GetRowBytes() *
973  videoFrame->GetHeight();
974  //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
975 
976  if (!no_video) {
977  IDeckLinkVideoFrameAncillary *vanc;
978  AVPacket txt_pkt;
979  uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
980  uint8_t *txt_buf = txt_buf0;
981 
982  if (ctx->enable_klv) {
983  handle_klv(avctx, ctx, videoFrame, pkt.pts);
984  }
985 
986  if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
987  int i;
988  BMDPixelFormat vanc_format = vanc->GetPixelFormat();
989  txt_buf[0] = 0x10; // data_identifier - EBU_data
990  txt_buf++;
991 #if CONFIG_LIBZVBI
992  if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
993  (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
994  int64_t line_mask = 1;
995  av_assert0(videoFrame->GetWidth() == 720);
996  for (i = 6; i < 336; i++, line_mask <<= 1) {
997  uint8_t *buf;
998  if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
999  if (vanc_format == bmdFormat8BitYUV)
1000  txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
1001  else
1002  txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
1003  }
1004  if (i == 22)
1005  i = 317;
1006  }
1007  }
1008 #endif
1009  if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= MAX_WIDTH_VANC) {
1010  int idx = get_vanc_line_idx(ctx->bmd_mode);
1011  for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].vanc_end; i++) {
1012  uint8_t *buf;
1013  if (vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
1014  uint16_t vanc[MAX_WIDTH_VANC];
1015  size_t vanc_size = videoFrame->GetWidth();
1016  if (ctx->bmd_mode == bmdModeNTSC && videoFrame->GetWidth() * 2 <= MAX_WIDTH_VANC) {
1017  vanc_size = vanc_size * 2;
1018  unpack_v210(vanc, buf, videoFrame->GetWidth());
1019  } else {
1020  extract_luma_from_v210(vanc, buf, videoFrame->GetWidth());
1021  }
1022  txt_buf = get_metadata(avctx, vanc, vanc_size,
1023  txt_buf, sizeof(txt_buf0) - (txt_buf - txt_buf0), &pkt);
1024  }
1025  if (i == vanc_line_numbers[idx].field0_vanc_end)
1026  i = vanc_line_numbers[idx].field1_vanc_start - 1;
1027  }
1028  }
1029  vanc->Release();
1030  if (txt_buf - txt_buf0 > 1) {
1031  int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
1032  while (stuffing_units--) {
1033  memset(txt_buf, 0xff, 46);
1034  txt_buf[1] = 0x2c; // data_unit_length
1035  txt_buf += 46;
1036  }
1037  av_init_packet(&txt_pkt);
1038  txt_pkt.pts = pkt.pts;
1039  txt_pkt.dts = pkt.dts;
1040  txt_pkt.stream_index = ctx->teletext_st->index;
1041  txt_pkt.data = txt_buf0;
1042  txt_pkt.size = txt_buf - txt_buf0;
1043  if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
1044  ++ctx->dropped;
1045  }
1046  }
1047  }
1048  }
1049 
1050  pkt.buf = av_buffer_create(pkt.data, pkt.size, decklink_object_free, videoFrame, 0);
1051  if (pkt.buf)
1052  videoFrame->AddRef();
1053 
1054  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1055  ++ctx->dropped;
1056  }
1057  }
1058 
1059  // Handle Audio Frame
1060  if (audioFrame) {
1061  AVPacket pkt;
1062  BMDTimeValue audio_pts;
1063  av_init_packet(&pkt);
1064 
1065  //hack among hacks
1066  pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (ctx->audio_depth / 8);
1067  audioFrame->GetBytes(&audioFrameBytes);
1068  audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
1069  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts, cctx->copyts);
1070  pkt.dts = pkt.pts;
1071 
1072  //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
1073  pkt.flags |= AV_PKT_FLAG_KEY;
1074  pkt.stream_index = ctx->audio_st->index;
1075  pkt.data = (uint8_t *)audioFrameBytes;
1076 
1077  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1078  ++ctx->dropped;
1079  }
1080  }
1081 
1082  return S_OK;
1083 }
1084 
1086  BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
1087  BMDDetectedVideoInputFormatFlags formatFlags)
1088 {
1089  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
1090  ctx->bmd_mode = mode->GetDisplayMode();
1091  // check the C context member to make sure we set both raw_format and bmd_mode with data from the same format change callback
1092  if (!cctx->raw_format)
1093  ctx->raw_format = (formatFlags & bmdDetectedVideoInputRGB444) ? bmdFormat8BitARGB : bmdFormat8BitYUV;
1094  return S_OK;
1095 }
1096 
1097 static int decklink_autodetect(struct decklink_cctx *cctx) {
1098  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1099  DECKLINK_BOOL autodetect_supported = false;
1100  int i;
1101 
1102  if (ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
1103  return -1;
1104  if (autodetect_supported == false)
1105  return -1;
1106 
1107  ctx->autodetect = 1;
1108  ctx->bmd_mode = bmdModeUnknown;
1109  if (ctx->dli->EnableVideoInput(AUTODETECT_DEFAULT_MODE,
1110  bmdFormat8BitYUV,
1111  bmdVideoInputEnableFormatDetection) != S_OK) {
1112  return -1;
1113  }
1114 
1115  if (ctx->dli->StartStreams() != S_OK) {
1116  return -1;
1117  }
1118 
1119  // 3 second timeout
1120  for (i = 0; i < 30; i++) {
1121  av_usleep(100000);
1122  /* Sometimes VideoInputFrameArrived is called without the
1123  * bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
1124  * So don't break for bmd_mode == AUTODETECT_DEFAULT_MODE. */
1125  if (ctx->bmd_mode != bmdModeUnknown &&
1127  break;
1128  }
1129 
1130  ctx->dli->PauseStreams();
1131  ctx->dli->FlushStreams();
1132  ctx->autodetect = 0;
1133  if (ctx->bmd_mode != bmdModeUnknown) {
1134  cctx->format_code = (char *)av_mallocz(5);
1135  if (!cctx->format_code)
1136  return -1;
1137  AV_WB32(cctx->format_code, ctx->bmd_mode);
1138  return 0;
1139  } else {
1140  return -1;
1141  }
1142 
1143 }
1144 
1145 extern "C" {
1146 
1148 {
1149  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1150  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1151 
1152  if (ctx->dli) {
1153  ctx->dli->StopStreams();
1154  ctx->dli->DisableVideoInput();
1155  ctx->dli->DisableAudioInput();
1156  }
1157 
1158  ff_decklink_cleanup(avctx);
1160 
1161  av_freep(&cctx->ctx);
1162 
1163  return 0;
1164 }
1165 
1167 {
1168  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1169  struct decklink_ctx *ctx;
1170  class decklink_allocator *allocator;
1172  AVStream *st;
1173  HRESULT result;
1174  int ret;
1175 
1176  ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
1177  if (!ctx)
1178  return AVERROR(ENOMEM);
1179  ctx->list_devices = cctx->list_devices;
1180  ctx->list_formats = cctx->list_formats;
1181  ctx->enable_klv = cctx->enable_klv;
1183  ctx->preroll = cctx->preroll;
1184  ctx->duplex_mode = cctx->duplex_mode;
1185  if (cctx->tc_format > 0 && (unsigned int)cctx->tc_format < FF_ARRAY_ELEMS(decklink_timecode_format_map))
1187  if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
1189  if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
1193  ctx->draw_bars = cctx->draw_bars;
1194  ctx->audio_depth = cctx->audio_depth;
1195  if (cctx->raw_format > 0 && (unsigned int)cctx->raw_format < FF_ARRAY_ELEMS(decklink_raw_format_map))
1197  cctx->ctx = ctx;
1198 
1199  /* Check audio channel option for valid values: 2, 8 or 16 */
1200  switch (cctx->audio_channels) {
1201  case 2:
1202  case 8:
1203  case 16:
1204  break;
1205  default:
1206  av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
1207  return AVERROR(EINVAL);
1208  }
1209 
1210  /* Check audio bit depth option for valid values: 16 or 32 */
1211  switch (cctx->audio_depth) {
1212  case 16:
1213  case 32:
1214  break;
1215  default:
1216  av_log(avctx, AV_LOG_ERROR, "Value for audio bit depth option must be either 16 or 32\n");
1217  return AVERROR(EINVAL);
1218  }
1219 
1220  /* List available devices. */
1221  if (ctx->list_devices) {
1222  ff_decklink_list_devices_legacy(avctx, 1, 0);
1223  return AVERROR_EXIT;
1224  }
1225 
1226  ret = ff_decklink_init_device(avctx, avctx->url);
1227  if (ret < 0)
1228  return ret;
1229 
1230  /* Get input device. */
1231  if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
1232  av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
1233  avctx->url);
1234  ret = AVERROR(EIO);
1235  goto error;
1236  }
1237 
1238  if (ff_decklink_set_configs(avctx, DIRECTION_IN) < 0) {
1239  av_log(avctx, AV_LOG_ERROR, "Could not set input configuration\n");
1240  ret = AVERROR(EIO);
1241  goto error;
1242  }
1243 
1244  /* List supported formats. */
1245  if (ctx->list_formats) {
1247  ret = AVERROR_EXIT;
1248  goto error;
1249  }
1250 
1252  ret = (ctx->dli->SetCallback(input_callback) == S_OK ? 0 : AVERROR_EXTERNAL);
1253  input_callback->Release();
1254  if (ret < 0) {
1255  av_log(avctx, AV_LOG_ERROR, "Cannot set input callback\n");
1256  goto error;
1257  }
1258 
1259  allocator = new decklink_allocator();
1260  ret = (ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 : AVERROR_EXTERNAL);
1261  allocator->Release();
1262  if (ret < 0) {
1263  av_log(avctx, AV_LOG_ERROR, "Cannot set custom memory allocator\n");
1264  goto error;
1265  }
1266 
1267  if (!cctx->format_code) {
1268  if (decklink_autodetect(cctx) < 0) {
1269  av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
1270  ret = AVERROR(EIO);
1271  goto error;
1272  }
1273  av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
1274  }
1275  if (ctx->raw_format == (BMDPixelFormat)0)
1276  ctx->raw_format = bmdFormat8BitYUV;
1277  if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
1278  av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
1279  cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
1280  ret = AVERROR(EIO);
1281  goto error;
1282  }
1283 
1284 #if !CONFIG_LIBZVBI
1285  if (ctx->teletext_lines && ctx->bmd_mode == bmdModePAL) {
1286  av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1287  ret = AVERROR(ENOSYS);
1288  goto error;
1289  }
1290 #endif
1291 
1292  /* Setup streams. */
1293  st = avformat_new_stream(avctx, NULL);
1294  if (!st) {
1295  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1296  ret = AVERROR(ENOMEM);
1297  goto error;
1298  }
1299  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
1300  st->codecpar->codec_id = cctx->audio_depth == 32 ? AV_CODEC_ID_PCM_S32LE : AV_CODEC_ID_PCM_S16LE;
1301  st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1302  st->codecpar->channels = cctx->audio_channels;
1303  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1304  ctx->audio_st=st;
1305 
1306  st = avformat_new_stream(avctx, NULL);
1307  if (!st) {
1308  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1309  ret = AVERROR(ENOMEM);
1310  goto error;
1311  }
1312  st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
1313  st->codecpar->width = ctx->bmd_width;
1314  st->codecpar->height = ctx->bmd_height;
1315 
1316  st->time_base.den = ctx->bmd_tb_den;
1317  st->time_base.num = ctx->bmd_tb_num;
1318  st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
1319 
1320  switch(ctx->raw_format) {
1321  case bmdFormat8BitYUV:
1322  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1323  st->codecpar->format = AV_PIX_FMT_UYVY422;
1324  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1325  break;
1326  case bmdFormat10BitYUV:
1327  st->codecpar->codec_id = AV_CODEC_ID_V210;
1328  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1329  break;
1330  case bmdFormat8BitARGB:
1331  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1332  st->codecpar->format = AV_PIX_FMT_0RGB;
1333  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1334  break;
1335  case bmdFormat8BitBGRA:
1336  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1337  st->codecpar->format = AV_PIX_FMT_BGR0;
1338  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1339  break;
1340  case bmdFormat10BitRGB:
1341  st->codecpar->codec_id = AV_CODEC_ID_R210;
1342  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1343  break;
1344  default:
1345  char fourcc_str[AV_FOURCC_MAX_STRING_SIZE] = {0};
1346  av_fourcc_make_string(fourcc_str, ctx->raw_format);
1347  av_log(avctx, AV_LOG_ERROR, "Raw Format %s not supported\n", fourcc_str);
1348  ret = AVERROR(EINVAL);
1349  goto error;
1350  }
1351 
1352  switch (ctx->bmd_field_dominance) {
1353  case bmdUpperFieldFirst:
1354  st->codecpar->field_order = AV_FIELD_TT;
1355  break;
1356  case bmdLowerFieldFirst:
1357  st->codecpar->field_order = AV_FIELD_BB;
1358  break;
1359  case bmdProgressiveFrame:
1360  case bmdProgressiveSegmentedFrame:
1361  st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
1362  break;
1363  }
1364 
1365  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1366 
1367  ctx->video_st=st;
1368 
1369  if (ctx->enable_klv) {
1370  st = avformat_new_stream(avctx, NULL);
1371  if (!st) {
1372  ret = AVERROR(ENOMEM);
1373  goto error;
1374  }
1375  st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
1376  st->time_base.den = ctx->bmd_tb_den;
1377  st->time_base.num = ctx->bmd_tb_num;
1378  st->codecpar->codec_id = AV_CODEC_ID_SMPTE_KLV;
1379  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1380  ctx->klv_st = st;
1381  }
1382 
1383  if (ctx->teletext_lines) {
1384  st = avformat_new_stream(avctx, NULL);
1385  if (!st) {
1386  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1387  ret = AVERROR(ENOMEM);
1388  goto error;
1389  }
1390  st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
1391  st->time_base.den = ctx->bmd_tb_den;
1392  st->time_base.num = ctx->bmd_tb_num;
1393  st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
1394  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1395  ctx->teletext_st = st;
1396  }
1397 
1398  av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
1399  result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
1400 
1401  if (result != S_OK) {
1402  av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
1403  ret = AVERROR(EIO);
1404  goto error;
1405  }
1406 
1407  result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
1408  ctx->raw_format,
1409  bmdVideoInputFlagDefault);
1410 
1411  if (result != S_OK) {
1412  av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
1413  ret = AVERROR(EIO);
1414  goto error;
1415  }
1416 
1417  avpacket_queue_init (avctx, &ctx->queue);
1418 
1419  if (ctx->dli->StartStreams() != S_OK) {
1420  av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
1421  ret = AVERROR(EIO);
1422  goto error;
1423  }
1424 
1425  return 0;
1426 
1427 error:
1428  ff_decklink_cleanup(avctx);
1429  return ret;
1430 }
1431 
1433 {
1434  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1435  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1436 
1437  avpacket_queue_get(&ctx->queue, pkt, 1);
1438 
1439  if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
1440  int size;
1441  const uint8_t *side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
1442  if (side_metadata) {
1443  if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)
1444  av_log(avctx, AV_LOG_ERROR, "Unable to set timecode\n");
1445  }
1446  }
1447 
1448  return 0;
1449 }
1450 
1452 {
1453  return ff_decklink_list_devices(avctx, device_list, 1, 0);
1454 }
1455 
1456 } /* extern "C" */
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
#define NULL
Definition: coverity.c:32
static int shift(int a, int b)
Definition: sonic.c:82
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
#define pthread_mutex_lock(a)
Definition: ffprobe.c:63
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4913
const uint8_t ff_reverse[256]
Definition: reverse.c:23
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
int num
Numerator.
Definition: rational.h:59
int index
stream index in AVFormatContext
Definition: avformat.h:885
int size
Definition: packet.h:364
BMDDisplayMode mode
Convenience header that includes libavutil&#39;s core.
#define tc
Definition: regdef.h:69
#define FF_ARRAY_ELEMS(a)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
static AVPacket pkt
static void error(const char *err)
pthread_cond_t cond
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
Format I/O context.
Definition: avformat.h:1243
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
Opaque data information usually continuous.
Definition: avutil.h:203
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:530
pthread_mutex_t mutex
AVPacket pkt
Definition: packet.h:397
timecode is drop frame
Definition: timecode.h:36
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
Definition: mxf.h:67
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4489
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
uint8_t * data
Definition: packet.h:363
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s it
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:664
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
ptrdiff_t size
Definition: opengl_enc.c:100
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:401
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
Main libavdevice API header.
#define src
Definition: vp8dsp.c:255
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static const uint16_t mask[17]
Definition: lzw.c:38
AVPacketList * last_pkt
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
char * url
input or output URL.
Definition: avformat.h:1339
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_FOURCC_MAX_STRING_SIZE
Definition: avutil.h:346
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:105
AVPacketList * first_pkt
unsigned long long size
#define width
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:672
AVFormatContext * ctx
Definition: movenc.c:48
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
AVDictionary * metadata
Definition: avformat.h:948
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:67
static volatile int checksum
Definition: adler32.c:30
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
char * av_fourcc_make_string(char *buf, uint32_t fourcc)
Fill the provided buffer with a string containing a FourCC (four-character code) representation.
Definition: utils.c:121
if(ret)
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:884
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
A list of zero terminated key/value strings.
Definition: packet.h:172
Timecode helpers header.
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, int *size)
Pack a dictionary for use in side_data.
Definition: avpacket.c:495
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
Definition: mmaldec.c:201
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
Rational number (pair of numerator and denominator).
Definition: rational.h:58
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:298
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
int64_t max_q_size
AVFormatContext * avctx
List of devices.
Definition: avdevice.h:465
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
#define flags(name, subs,...)
Definition: cbs_av1.c:561
char * av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
Load timecode string in buf.
Definition: timecode.c:102
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
#define av_parity
Definition: intmath.h:158
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Main libavformat public API header.
int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
Init a timecode struct from the passed timecode components.
Definition: timecode.c:229
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
struct AVPacketList * next
Definition: packet.h:398
common internal and external API header
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define av_free(p)
int len
void * priv_data
Format private data.
Definition: avformat.h:1271
int channels
Audio only.
Definition: codec_par.h:166
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
and forward the result(frame or status change) to the corresponding input.If nothing is possible
#define av_freep(p)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1049
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
int stream_index
Definition: packet.h:365
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:913
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1026
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
This structure stores compressed data.
Definition: packet.h:340
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:101
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
Convert frame number to SMPTE 12M binary representation.
Definition: timecode.c:52