44 #define INPUT_CLEANSRC 1 114 #define OFFSET(x) offsetof(FieldMatchContext, x) 115 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM 126 {
"pc_n_ub",
"2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0,
AV_OPT_TYPE_CONST, {.i64=
MODE_PC_N_UB}, INT_MIN, INT_MAX,
FLAGS,
"mode" },
135 {
"y0",
"define an exclusion band which excludes the lines between y0 and y1 from the field matching decision",
OFFSET(
y0),
AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX,
FLAGS },
136 {
"y1",
"define an exclusion band which excludes the lines between y0 and y1 from the field matching decision",
OFFSET(
y1),
AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
146 {
"cthresh",
"set the area combing threshold used for combed frame detection",
OFFSET(
cthresh),
AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
147 {
"chroma",
"set whether or not chroma is considered in the combed frame decision",
OFFSET(
chroma),
AV_OPT_TYPE_BOOL,{.i64= 0}, 0, 1, FLAGS },
148 {
"blockx",
"set the x-axis size of the window used during combed frame detection",
OFFSET(
blockx),
AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
149 {
"blocky",
"set the y-axis size of the window used during combed frame detection",
OFFSET(
blocky),
AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
150 {
"combpel",
"set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed",
OFFSET(
combpel),
AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
171 const int src1_linesize = f1->
linesize[0];
172 const int src2_linesize = f2->
linesize[0];
177 for (y = 0; y <
height; y++) {
178 for (x = 0; x <
width; x++)
179 acc +=
abs(srcp1[x] - srcp2[x]);
180 srcp1 += src1_linesize;
181 srcp2 += src2_linesize;
190 for (y = 0; y <
h; y++) {
198 int x, y, plane, max_v = 0;
200 const int cthresh6 = cthresh * 6;
202 for (plane = 0; plane < (fm->
chroma ? 3 : 1); plane++) {
204 const int src_linesize = src->
linesize[plane];
211 fill_buf(cmkp, width, height, cmk_linesize, 0xff);
214 fill_buf(cmkp, width, height, cmk_linesize, 0);
217 #define FILTER(xm2, xm1, xp1, xp2) \ 219 -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \ 220 + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6 223 for (x = 0; x <
width; x++) {
224 const int s1 =
abs(srcp[x] - srcp[x + src_linesize]);
225 if (s1 > cthresh &&
FILTER(2, 1, 1, 2))
228 srcp += src_linesize;
229 cmkp += cmk_linesize;
232 for (x = 0; x <
width; x++) {
233 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
234 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
235 if (s1 > cthresh && s2 > cthresh &&
FILTER(2, -1, 1, 2))
238 srcp += src_linesize;
239 cmkp += cmk_linesize;
242 for (y = 2; y < height-2; y++) {
243 for (x = 0; x <
width; x++) {
244 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
245 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
246 if (s1 > cthresh && s2 > cthresh &&
FILTER(-2, -1, 1, 2))
249 srcp += src_linesize;
250 cmkp += cmk_linesize;
254 for (x = 0; x <
width; x++) {
255 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
256 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
257 if (s1 > cthresh && s2 > cthresh &&
FILTER(-2, -1, 1, -2))
260 srcp += src_linesize;
261 cmkp += cmk_linesize;
264 for (x = 0; x <
width; x++) {
265 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
266 if (s1 > cthresh &&
FILTER(-2, -1, -1, -2))
279 uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
280 uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
281 uint8_t *cmkpnn = cmkp + cmk_linesize;
282 for (y = 1; y < height - 1; y++) {
283 cmkpp += cmk_linesize;
284 cmkp += cmk_linesize;
285 cmkpn += cmk_linesize;
286 cmkpnn += cmk_linesize;
287 cmkpV += cmk_linesizeUV;
288 cmkpU += cmk_linesizeUV;
289 for (x = 1; x < width - 1; x++) {
290 #define HAS_FF_AROUND(p, lz) (p[(x)-1 - (lz)] == 0xff || p[(x) - (lz)] == 0xff || p[(x)+1 - (lz)] == 0xff || \ 291 p[(x)-1 ] == 0xff || p[(x)+1 ] == 0xff || \ 292 p[(x)-1 + (lz)] == 0xff || p[(x) + (lz)] == 0xff || p[(x)+1 + (lz)] == 0xff) 293 if ((cmkpV[x] == 0xff &&
HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
294 (cmkpU[x] == 0xff &&
HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
295 ((uint16_t*)cmkp)[x] = 0xffff;
296 ((uint16_t*)cmkpn)[x] = 0xffff;
297 if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
298 else ((uint16_t*)cmkpnn)[x] = 0xffff;
307 const int xhalf = blockx/2;
308 const int yhalf = blocky/2;
313 const int xblocks = ((width+xhalf)/blockx) + 1;
314 const int xblocks4 = xblocks<<2;
315 const int yblocks = ((height+yhalf)/blocky) + 1;
317 const int arraysize = (xblocks*yblocks)<<2;
318 int heighta = (height/(blocky/2))*(blocky/2);
319 const int widtha = (width /(blockx/2))*(blockx/2);
320 if (heighta == height)
321 heighta = height - yhalf;
322 memset(c_array, 0, arraysize *
sizeof(*c_array));
324 #define C_ARRAY_ADD(v) do { \ 325 const int box1 = (x / blockx) * 4; \ 326 const int box2 = ((x + xhalf) / blockx) * 4; \ 327 c_array[temp1 + box1 ] += v; \ 328 c_array[temp1 + box2 + 1] += v; \ 329 c_array[temp2 + box1 + 2] += v; \ 330 c_array[temp2 + box2 + 3] += v; \ 333 #define VERTICAL_HALF(y_start, y_end) do { \ 334 for (y = y_start; y < y_end; y++) { \ 335 const int temp1 = (y / blocky) * xblocks4; \ 336 const int temp2 = ((y + yhalf) / blocky) * xblocks4; \ 337 for (x = 0; x < width; x++) \ 338 if (cmkp[x - cmk_linesize] == 0xff && \ 339 cmkp[x ] == 0xff && \ 340 cmkp[x + cmk_linesize] == 0xff) \ 342 cmkp += cmk_linesize; \ 348 for (y = yhalf; y < heighta; y += yhalf) {
349 const int temp1 = (y /
blocky) * xblocks4;
350 const int temp2 = ((y + yhalf) / blocky) * xblocks4;
352 for (x = 0; x < widtha; x += xhalf) {
353 const uint8_t *cmkp_tmp = cmkp + x;
355 for (u = 0; u < yhalf; u++) {
356 for (v = 0; v < xhalf; v++)
357 if (cmkp_tmp[v - cmk_linesize] == 0xff &&
358 cmkp_tmp[v ] == 0xff &&
359 cmkp_tmp[v + cmk_linesize] == 0xff)
361 cmkp_tmp += cmk_linesize;
367 for (x = widtha; x <
width; x++) {
368 const uint8_t *cmkp_tmp = cmkp + x;
370 for (u = 0; u < yhalf; u++) {
371 if (cmkp_tmp[-cmk_linesize] == 0xff &&
372 cmkp_tmp[ 0] == 0xff &&
373 cmkp_tmp[ cmk_linesize] == 0xff)
375 cmkp_tmp += cmk_linesize;
381 cmkp += cmk_linesize * yhalf;
386 for (x = 0; x < arraysize; x++)
387 if (c_array[x] > max_v)
395 const uint8_t *nxtp,
int nxt_linesize,
401 prvp -= prv_linesize;
402 nxtp -= nxt_linesize;
403 for (y = 0; y <
height; y++) {
404 for (x = 0; x <
width; x++)
405 tbuffer[x] =
FFABS(prvp[x] - nxtp[x]);
406 prvp += prv_linesize;
407 nxtp += nxt_linesize;
408 tbuffer += tbuf_linesize;
416 const uint8_t *prvp,
int prv_linesize,
417 const uint8_t *nxtp,
int nxt_linesize,
419 int width,
int plane)
426 fm->
tbuffer, tpitch, width, height>>1);
428 for (y = 2; y < height - 2; y += 2) {
429 for (x = 1; x < width - 1; x++) {
432 for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
433 count += dp[u-tpitch] > 3;
435 count += dp[u+tpitch] > 3;
440 int upper = 0, lower = 0;
441 for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
442 if (dp[u-tpitch] > 19) { count++; upper = 1; }
443 if (dp[u ] > 19) count++;
444 if (dp[u+tpitch] > 19) { count++; lower = 1; }
447 if (upper && lower) {
450 int upper2 = 0, lower2 = 0;
451 for (u =
FFMAX(x-4,0); u <
FFMIN(x+5,width); u++) {
452 if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
453 if ( dp[u- tpitch] > 19) upper = 1;
454 if ( dp[u+ tpitch] > 19) lower = 1;
455 if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
457 if ((upper && (lower || upper2)) ||
458 (lower && (upper || lower2)))
469 dstp += dst_linesize;
477 return match < 3 ? 2 - field : 1 +
field;
482 if (match ==
mP || match ==
mB)
return fm->
prv;
483 else if (match ==
mN || match ==
mU)
return fm->
nxt;
490 uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
491 uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
492 int norm1, norm2, mtn1, mtn2;
496 for (plane = 0; plane < (fm->
mchroma ? 3 : 1); plane++) {
497 int x, y, temp1, temp2, fbase;
502 const int src_linesize = src->
linesize[plane];
503 const int srcf_linesize = src_linesize << 1;
504 int prv_linesize, nxt_linesize;
505 int prvf_linesize, nxtf_linesize;
508 const int y0a = fm->
y0 >> (plane ? fm->
vsub : 0);
509 const int y1a = fm->
y1 >> (plane ? fm->
vsub : 0);
510 const int startx = (plane == 0 ? 8 : 8 >> fm->
hsub);
511 const int stopx = width - startx;
512 const uint8_t *srcpf, *srcf, *srcnf;
513 const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
515 fill_buf(mapp, width, height, map_linesize, 0);
519 srcf = srcp + (fbase + 1) * src_linesize;
520 srcpf = srcf - srcf_linesize;
521 srcnf = srcf + srcf_linesize;
524 prv_linesize = prev->
linesize[plane];
525 prvf_linesize = prv_linesize << 1;
526 prvpf = prev->
data[plane] + fbase * prv_linesize;
527 prvnf = prvpf + prvf_linesize;
532 nxt_linesize = next->
linesize[plane];
533 nxtf_linesize = nxt_linesize << 1;
534 nxtpf = next->
data[plane] + fbase * nxt_linesize;
535 nxtnf = nxtpf + nxtf_linesize;
538 if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
540 mapp, map_linesize, height, width, plane);
543 mapp + map_linesize, map_linesize, height, width, plane);
545 for (y = 2; y < height - 2; y += 2) {
546 if (y0a == y1a || y < y0a || y > y1a) {
547 for (x = startx; x < stopx; x++) {
548 if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
549 temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x];
551 temp2 =
abs(3 * (prvpf[x] + prvnf[x]) - temp1);
552 if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
555 if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
557 if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
561 temp2 =
abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
562 if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
565 if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
567 if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
573 prvpf += prvf_linesize;
574 prvnf += prvf_linesize;
575 srcpf += srcf_linesize;
576 srcf += srcf_linesize;
577 srcnf += srcf_linesize;
578 nxtpf += nxtf_linesize;
579 nxtnf += nxtf_linesize;
584 if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
585 FFMAX(accumPml,accumNml) > 3*
FFMIN(accumPml,accumNml)) {
590 norm1 = (
int)((accumPc / 6.0
f) + 0.5f);
591 norm2 = (
int)((accumNc / 6.0
f) + 0.5f);
592 mtn1 = (
int)((accumPm / 6.0
f) + 0.5f);
593 mtn2 = (
int)((accumNm / 6.0
f) + 0.5f);
594 c1 = ((float)
FFMAX(norm1,norm2)) / ((
float)
FFMAX(
FFMIN(norm1,norm2),1));
595 c2 = ((float)
FFMAX(mtn1, mtn2)) / ((
float)
FFMAX(
FFMIN(mtn1, mtn2), 1));
596 mr = ((float)
FFMAX(mtn1, mtn2)) / ((
float)
FFMAX(
FFMAX(norm1,norm2),1));
597 if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
598 ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
599 ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
600 ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
601 ret = mtn1 > mtn2 ? match2 : match1;
602 else if (mr > 0.005 &&
FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
603 ret = mtn1 > mtn2 ? match2 : match1;
605 ret = norm1 > norm2 ? match2 : match1;
613 for (plane = 0; plane < 4 && src->
data[plane] && src->
linesize[plane]; plane++) {
614 const int plane_h =
get_height(fm, src, plane);
615 const int nb_copy_fields = (plane_h >> 1) + (field ? 0 : (plane_h & 1));
654 #define LOAD_COMB(mid) do { \ 655 if (combs[mid] < 0) { \ 656 if (!gen_frames[mid]) \ 657 gen_frames[mid] = create_weave_frame(ctx, mid, field, \ 658 fm->prv, fm->src, fm->nxt); \ 659 combs[mid] = calc_combed_score(fm, gen_frames[mid]); \ 666 if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->
combpel)) &&
667 abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->
combpel)
681 int combs[] = { -1, -1, -1, -1, -1 };
688 #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \ 690 av_frame_free(&prv); \ 699 av_assert0(prv && src && nxt); \ 718 av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
728 if (!gen_frames[i]) {
735 combs[0], combs[1], combs[2], combs[3], combs[4]);
738 if (!gen_frames[
mC]) {
769 match =
checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
772 match =
checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
775 match =
checkmm(ctx, combs, match, fxo[
mU], gen_frames, field);
778 match =
checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
779 match =
checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
780 match =
checkmm(ctx, combs, match, fxo[
mB], gen_frames, field);
784 match =
checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
787 match =
checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
788 match =
checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
801 if (!gen_frames[match]) {
804 dst = gen_frames[match];
805 gen_frames[match] =
NULL;
823 " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
938 const int w = inlink->
w;
939 const int h = inlink->
h;
977 pad.
name =
"clean_src";
990 av_log(ctx,
AV_LOG_ERROR,
"Combed pixel should not be larger than blockx x blocky\n");
1029 outlink->
w = inlink->
w;
1030 outlink->
h = inlink->
h;
1044 .
name =
"fieldmatch",
1052 .
outputs = fieldmatch_outputs,
1053 .priv_class = &fieldmatch_class,
int64_t frame_count_in
Number of past frames sent through the link.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define VERTICAL_HALF(y_start, y_end)
#define AV_PIX_FMT_YUV440P10
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUV444P14
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Main libavfilter public API header.
static av_cold int init(AVCodecContext *avctx)
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
int h
agreed upon image height
static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
static int get_field_base(int match, int field)
static int config_output(AVFilterLink *outlink)
#define AV_PIX_FMT_YUV420P12
static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
#define FF_ARRAY_ELEMS(a)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVFilterFormatsConfig outcfg
Lists of supported formats / etc.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
int combmatch
comb_matching_mode
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static AVFrame * create_weave_frame(AVFilterContext *ctx, int match, int field, const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static av_cold int uninit(AVCodecContext *avctx)
timestamp utils, mostly useful for debugging/logging purposes
static const AVOption fieldmatch_options[]
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define u(width, name, range_min, range_max)
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AVERROR_EOF
End of file.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
#define AV_PIX_FMT_YUV422P12
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
A filter pad used for either input or output.
A link between two filters.
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0, will be automatically copied from the first input of the source filter if it exists.
static av_cold int fieldmatch_init(AVFilterContext *ctx)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
uint32_t eof
bitmask for end of stream
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
#define AV_PIX_FMT_YUV422P9
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int(* config_props)(AVFilterLink *link)
Link configuration callback.
static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static const AVFilterPad fieldmatch_outputs[]
static AVFrame * select_frame(FieldMatchContext *fm, int match)
AVFrame * nxt
main sliding window of 3 frames
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
AVFILTER_DEFINE_CLASS(fieldmatch)
AVFilterContext * src
source filter
int bpc
bytes per component
#define AV_PIX_FMT_YUV444P9
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
static const AVFilterPad outputs[]
int format
agreed upon media format
static int activate(AVFilterContext *ctx)
static void build_diff_map(FieldMatchContext *fm, const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *dstp, int dst_linesize, int height, int width, int plane)
Build a map over which pixels differ a lot/a little.
#define AV_PIX_FMT_YUV420P16
#define AV_LOG_INFO
Standard information.
AVFilter ff_vf_fieldmatch
#define AV_PIX_FMT_YUV420P14
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int got_frame[2]
frame request flag for each input stream
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
#define HAS_FF_AROUND(p, lz)
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
const char * name
Filter name.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define flags(name, subs,...)
#define AV_PIX_FMT_YUV422P10
static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2, AVFrame **gen_frames, int field)
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *tbuffer, int tbuf_linesize, int width, int height)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FF_INLINK_IDX(link)
Find the index of a link.
#define SLIDING_FRAME_WINDOW(prv, src, nxt)
#define FILTER(xm2, xm1, xp1, xp2)
static int query_formats(AVFilterContext *ctx)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int config_input(AVFilterLink *inlink)
static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
AVFilterFormatsConfig incfg
Lists of supported formats / etc.
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field)
static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int top_field_first
If the content is interlaced, is top field displayed first.
AVFilterContext * dst
dest filter
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
AVFrame * nxt2
sliding window of the optional second stream
static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
int vsub
chroma subsampling values
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define av_malloc_array(a, b)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
mode
Use these values in ebur128_init (or'ed).
static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
#define AV_PIX_FMT_YUV422P16
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_CEIL_RSHIFT(a, b)
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.