FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_mp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Michael Niedermayer
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * Parts of this file have been stolen from mplayer
21  */
22 
23 /**
24  * @file
25  */
26 
27 #include "avfilter.h"
28 #include "video.h"
29 #include "formats.h"
30 #include "internal.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/opt.h"
36 
37 #include "libmpcodecs/vf.h"
38 #include "libmpcodecs/img_format.h"
39 #include "libmpcodecs/cpudetect.h"
41 
42 #include "libswscale/swscale.h"
43 
44 
45 //FIXME maybe link the orig in
46 //XXX: identical pix_fmt must be following with each others
47 static const struct {
48  int fmt;
50 } conversion_map[] = {
95 
97 
104 
105  // YUVJ are YUV formats that use the full Y range and not just
106  // 16 - 235 (see colorspaces.txt).
107  // Currently they are all treated the same way.
112 
113 #if FF_API_XVMC
116 #endif /* FF_API_XVMC */
117 
124  {0, AV_PIX_FMT_NONE}
125 };
126 
127 static const vf_info_t* const filters[]={
128  NULL
129 };
130 
131 /*
132 Unsupported filters
133 1bpp
134 ass
135 bmovl
136 crop
137 dvbscale
138 flip
139 expand
140 format
141 halfpack
142 lavc
143 lavcdeint
144 noformat
145 pp
146 scale
147 tfields
148 vo
149 yadif
150 zrmjpeg
151 */
152 
153 CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work
154 
156  int i;
157  for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++)
158  ;
159  return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE;
160 }
161 
162 typedef struct {
163  const AVClass *class;
168  char *filter;
170 } MPContext;
171 
172 #define OFFSET(x) offsetof(MPContext, x)
173 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
174 static const AVOption mp_options[] = {
175  { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
176  { NULL }
177 };
178 
180 
181 void ff_mp_msg(int mod, int lev, const char *format, ... ){
182  va_list va;
183  va_start(va, format);
184  //FIXME convert lev/mod
185  av_vlog(NULL, AV_LOG_DEBUG, format, va);
186  va_end(va);
187 }
188 
189 int ff_mp_msg_test(int mod, int lev){
190  return 123;
191 }
192 
193 //Exact copy of vf.c
195  dst->pict_type= src->pict_type;
196  dst->fields = src->fields;
197  dst->qscale_type= src->qscale_type;
198  if(dst->width == src->width && dst->height == src->height){
199  dst->qstride= src->qstride;
200  dst->qscale= src->qscale;
201  }
202 }
203 
204 //Exact copy of vf.c
205 void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){
206  if (vf->next->draw_slice) {
207  vf->next->draw_slice(vf->next,src,stride,w,h,x,y);
208  return;
209  }
210  if (!vf->dmpi) {
211  ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name);
212  return;
213  }
214  if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) {
215  memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x,
216  src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]);
217  return;
218  }
219  memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0],
220  w, h, vf->dmpi->stride[0], stride[0]);
221  memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift),
222  src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]);
223  memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift),
224  src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]);
225 }
226 
227 //Exact copy of vf.c
228 void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){
229  int y;
230  if(mpi->flags&MP_IMGFLAG_PLANAR){
231  y0&=~1;h+=h&1;
232  if(x0==0 && w==mpi->width){
233  // full width clear:
234  memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h);
235  memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift));
236  memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift));
237  } else
238  for(y=y0;y<y0+h;y+=2){
239  memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w);
240  memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w);
241  memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
242  memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
243  }
244  return;
245  }
246  // packed:
247  for(y=y0;y<y0+h;y++){
248  unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0;
249  if(mpi->flags&MP_IMGFLAG_YUV){
250  unsigned int* p=(unsigned int*) dst;
251  int size=(mpi->bpp>>3)*w/4;
252  int i;
253 #if HAVE_BIGENDIAN
254 #define CLEAR_PACKEDYUV_PATTERN 0x00800080
255 #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000
256 #else
257 #define CLEAR_PACKEDYUV_PATTERN 0x80008000
258 #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080
259 #endif
260  if(mpi->flags&MP_IMGFLAG_SWAPPED){
261  for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
262  for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
263  } else {
264  for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN;
265  for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN;
266  }
267  } else
268  memset(dst,0,(mpi->bpp>>3)*w);
269  }
270 }
271 
272 int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){
273  return 1;
274 }
275 
276 //used by delogo
277 unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){
278  return preferred;
279 }
280 
281 mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){
282  MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf));
283  mp_image_t* mpi=NULL;
284  int w2;
285  int number = mp_imgtype >> 16;
286 
287  av_assert0(vf->next == NULL); // all existing filters call this just on next
288 
289  //vf_dint needs these as it calls ff_vf_get_image() before configuring the output
290  if(vf->w==0 && w>0) vf->w=w;
291  if(vf->h==0 && h>0) vf->h=h;
292 
293  av_assert0(w == -1 || w >= vf->w);
294  av_assert0(h == -1 || h >= vf->h);
295  av_assert0(vf->w > 0);
296  av_assert0(vf->h > 0);
297 
298  av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h);
299 
300  if (w == -1) w = vf->w;
301  if (h == -1) h = vf->h;
302 
303  w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w;
304 
305  // Note: we should call libvo first to check if it supports direct rendering
306  // and if not, then fallback to software buffers:
307  switch(mp_imgtype & 0xff){
308  case MP_IMGTYPE_EXPORT:
309  if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h);
310  mpi=vf->imgctx.export_images[0];
311  break;
312  case MP_IMGTYPE_STATIC:
313  if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h);
314  mpi=vf->imgctx.static_images[0];
315  break;
316  case MP_IMGTYPE_TEMP:
317  if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
318  mpi=vf->imgctx.temp_images[0];
319  break;
320  case MP_IMGTYPE_IPB:
321  if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame:
322  if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
323  mpi=vf->imgctx.temp_images[0];
324  break;
325  }
326  case MP_IMGTYPE_IP:
328  mpi=vf->imgctx.static_images[vf->imgctx.static_idx];
329  vf->imgctx.static_idx^=1;
330  break;
331  case MP_IMGTYPE_NUMBERED:
332  if (number == -1) {
333  int i;
334  for (i = 0; i < NUM_NUMBERED_MPI; i++)
335  if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count)
336  break;
337  number = i;
338  }
339  if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL;
340  if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h);
341  mpi = vf->imgctx.numbered_images[number];
342  mpi->number = number;
343  break;
344  }
345  if(mpi){
346  mpi->type=mp_imgtype;
347  mpi->w=vf->w; mpi->h=vf->h;
348  // keep buffer allocation status & color flags only:
349 // mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT);
351  // accept restrictions, draw_slice and palette flags only:
353  if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK;
354  if(mpi->width!=w2 || mpi->height!=h){
355 // printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h);
356  if(mpi->flags&MP_IMGFLAG_ALLOCATED){
357  if(mpi->width<w2 || mpi->height<h){
358  // need to re-allocate buffer memory:
359  av_free(mpi->planes[0]);
361  ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n");
362  }
363 // } else {
364  } {
365  mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
366  mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift;
367  }
368  }
369  if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt);
370  if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){
371 
372  av_assert0(!vf->get_image);
373  // check libvo first!
374  if(vf->get_image) vf->get_image(vf,mpi);
375 
376  if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
377  // non-direct and not yet allocated image. allocate it!
378  if (!mpi->bpp) { // no way we can allocate this
380  "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n");
381  return NULL;
382  }
383 
384  // check if codec prefer aligned stride:
385  if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){
386  int align=(mpi->flags&MP_IMGFLAG_PLANAR &&
387  mpi->flags&MP_IMGFLAG_YUV) ?
388  (8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME
389  w2=((w+align)&(~align));
390  if(mpi->width!=w2){
391 #if 0
392  // we have to change width... check if we CAN co it:
393  int flags=vf->query_format(vf,outfmt); // should not fail
394  if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n");
395 // printf("query -> 0x%X \n",flags);
396  if(flags&VFCAP_ACCEPT_STRIDE){
397 #endif
398  mpi->width=w2;
399  mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
400 // }
401  }
402  }
403 
405 // printf("clearing img!\n");
406  ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height);
407  }
408  }
409  av_assert0(!vf->start_slice);
411  if(vf->start_slice) vf->start_slice(vf,mpi);
412  if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){
413  ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n",
414  "NULL"/*vf->info->name*/,
415  (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting":
416  ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"),
417  (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"",
418  mpi->width,mpi->height,mpi->bpp,
419  (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"),
420  (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed",
421  mpi->bpp*mpi->width*mpi->height/8);
422  ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n",
423  mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2],
424  mpi->stride[0], mpi->stride[1], mpi->stride[2],
427  }
428 
429  mpi->qscale = NULL;
430  mpi->usage_count++;
431  }
432 // printf("\rVF_MPI: %p %p %p %d %d %d \n",
433 // mpi->planes[0],mpi->planes[1],mpi->planes[2],
434 // mpi->stride[0],mpi->stride[1],mpi->stride[2]);
435  return mpi;
436 }
437 
438 int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
439  MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
440  AVFilterLink *outlink = m->avfctx->outputs[0];
441  AVFrame *picref = av_frame_alloc();
442  int i;
443 
444  av_assert0(vf->next);
445 
446  av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n");
447 
448  if (!picref)
449  goto fail;
450 
451  picref->width = mpi->w;
452  picref->height = mpi->h;
453 
454  for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++);
455  picref->format = conversion_map[i].pix_fmt;
456 
457  for(i=0; conversion_map[i].fmt && m->in_pix_fmt != conversion_map[i].pix_fmt; i++);
458  if (mpi->imgfmt == conversion_map[i].fmt)
459  picref->format = conversion_map[i].pix_fmt;
460 
461  memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride)));
462 
463  for(i=0; i<4 && mpi->stride[i]; i++){
464  picref->data[i] = mpi->planes[i];
465  }
466 
467  if(pts != MP_NOPTS_VALUE)
468  picref->pts= pts * av_q2d(outlink->time_base);
469 
470  if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy
471  AVFrame *tofree = picref;
472  picref = av_frame_clone(picref);
473  av_frame_free(&tofree);
474  }
475 
476  ff_filter_frame(outlink, picref);
477  m->frame_returned++;
478 
479  return 1;
480 fail:
481  av_frame_free(&picref);
482  return 0;
483 }
484 
485 int ff_vf_next_config(struct vf_instance *vf,
486  int width, int height, int d_width, int d_height,
487  unsigned int voflags, unsigned int outfmt){
488 
489  av_assert0(width>0 && height>0);
490  vf->next->w = width; vf->next->h = height;
491 
492  return 1;
493 #if 0
494  int flags=vf->next->query_format(vf->next,outfmt);
495  if(!flags){
496  // hmm. colorspace mismatch!!!
497  //this is fatal for us ATM
498  return 0;
499  }
500  ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs);
501  miss=vf->default_reqs - (flags&vf->default_reqs);
502  if(miss&VFCAP_ACCEPT_STRIDE){
503  // vf requires stride support but vf->next doesn't support it!
504  // let's insert the 'expand' filter, it does the job for us:
505  vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL);
506  if(!vf2) return 0; // shouldn't happen!
507  vf->next=vf2;
508  }
509  vf->next->w = width; vf->next->h = height;
510  return 1;
511 #endif
512 }
513 
514 int ff_vf_next_control(struct vf_instance *vf, int request, void* data){
515  MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
516  av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request);
517  return 0;
518 }
519 
520 static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
521  MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
522  int i;
523  av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt);
524 
525  for(i=0; conversion_map[i].fmt; i++){
526  if(fmt==conversion_map[i].fmt)
527  return 1; //we suport all
528  }
529  return 0;
530 }
531 
532 
533 static av_cold int init(AVFilterContext *ctx)
534 {
535  MPContext *m = ctx->priv;
536  int cpu_flags = av_get_cpu_flags();
537  char name[256];
538  const char *args;
539  int i;
540 
541  ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX;
542  ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2;
543  ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE;
544  ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2;
545  ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3;
546  ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3;
547  ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4;
548  ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42;
549  ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX;
550  ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW;
551  ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT;
552 
553  m->avfctx= ctx;
554 
555  args = m->filter;
556  if(!args || 1!=sscanf(args, "%255[^:=]", name)){
557  av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n");
558  return AVERROR(EINVAL);
559  }
560  args += strlen(name);
561  if (args[0] == '=')
562  args++;
563 
564  for(i=0; ;i++){
565  if(!filters[i] || !strcmp(name, filters[i]->name))
566  break;
567  }
568 
569  if(!filters[i]){
570  av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name);
571  return AVERROR(EINVAL);
572  }
573 
574  av_log(ctx, AV_LOG_WARNING,
575  "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n"
576  "once it has been ported to a native libavfilter.\n", name);
577 
578  memset(&m->vf,0,sizeof(m->vf));
579  m->vf.info= filters[i];
580 
581  m->vf.next = &m->next_vf;
587  m->vf.default_reqs=0;
588  if(m->vf.info->opts)
589  av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n");
590 #if 0
591  if(vf->info->opts) { // vf_vo get some special argument
592  const m_struct_t* st = vf->info->opts;
593  void* vf_priv = m_struct_alloc(st);
594  int n;
595  for(n = 0 ; args && args[2*n] ; n++)
596  m_struct_set(st,vf_priv,args[2*n],args[2*n+1]);
597  vf->priv = vf_priv;
598  args = NULL;
599  } else // Otherwise we should have the '_oldargs_'
600  if(args && !strcmp(args[0],"_oldargs_"))
601  args = (char**)args[1];
602  else
603  args = NULL;
604 #endif
605  if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){
606  av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args);
607  return -1;
608  }
609 
610  return 0;
611 }
612 
613 static av_cold void uninit(AVFilterContext *ctx)
614 {
615  MPContext *m = ctx->priv;
616  vf_instance_t *vf = &m->vf;
617 
618  while(vf){
619  vf_instance_t *next = vf->next;
620  if(vf->uninit)
621  vf->uninit(vf);
626  vf = next;
627  }
628 }
629 
631 {
632  AVFilterFormats *avfmts=NULL;
633  MPContext *m = ctx->priv;
634  enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE;
635  int i;
636 
637  for(i=0; conversion_map[i].fmt; i++){
638  av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt);
639  if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){
640  av_log(ctx, AV_LOG_DEBUG, "supported,adding\n");
641  if (conversion_map[i].pix_fmt != lastpixfmt) {
642  ff_add_format(&avfmts, conversion_map[i].pix_fmt);
643  lastpixfmt = conversion_map[i].pix_fmt;
644  }
645  }
646  }
647 
648  if (!avfmts)
649  return -1;
650 
651  //We assume all allowed input formats are also allowed output formats
652  ff_set_common_formats(ctx, avfmts);
653  return 0;
654 }
655 
656 static int config_inprops(AVFilterLink *inlink)
657 {
658  MPContext *m = inlink->dst->priv;
659  int i;
660  for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
661 
662  av_assert0(conversion_map[i].fmt && inlink->w && inlink->h);
663 
664  m->vf.fmt.have_configured = 1;
665  m->vf.fmt.orig_height = inlink->h;
666  m->vf.fmt.orig_width = inlink->w;
667  m->vf.fmt.orig_fmt = conversion_map[i].fmt;
668 
669  if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0)
670  return -1;
671 
672  return 0;
673 }
674 
675 static int config_outprops(AVFilterLink *outlink)
676 {
677  MPContext *m = outlink->src->priv;
678 
679  outlink->w = m->next_vf.w;
680  outlink->h = m->next_vf.h;
681 
682  return 0;
683 }
684 
685 static int request_frame(AVFilterLink *outlink)
686 {
687  MPContext *m = outlink->src->priv;
688  int ret;
689 
690  av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n");
691 
692  for(m->frame_returned=0; !m->frame_returned;){
693  ret=ff_request_frame(outlink->src->inputs[0]);
694  if(ret<0)
695  break;
696  }
697 
698  av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret);
699  return ret;
700 }
701 
702 static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
703 {
704  MPContext *m = inlink->dst->priv;
705  int i;
706  double pts= MP_NOPTS_VALUE;
707  mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height);
708 
709  if(inpic->pts != AV_NOPTS_VALUE)
710  pts= inpic->pts / av_q2d(inlink->time_base);
711 
712  for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
714  m->in_pix_fmt = inlink->format;
715 
716  memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes)));
717  memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride)));
718 
719  if (inpic->interlaced_frame)
721  if (inpic->top_field_first)
723  if (inpic->repeat_pict)
725 
726  // mpi->flags|=MP_IMGFLAG_ALLOCATED; ?
727  mpi->flags |= MP_IMGFLAG_READABLE;
728  if(!av_frame_is_writable(inpic))
729  mpi->flags |= MP_IMGFLAG_PRESERVE;
730  if(m->vf.put_image(&m->vf, mpi, pts) == 0){
731  av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n");
732  }else{
733  av_frame_free(&inpic);
734  }
735  ff_free_mp_image(mpi);
736  return 0;
737 }
738 
739 static const AVFilterPad mp_inputs[] = {
740  {
741  .name = "default",
742  .type = AVMEDIA_TYPE_VIDEO,
743  .filter_frame = filter_frame,
744  .config_props = config_inprops,
745  },
746  { NULL }
747 };
748 
749 static const AVFilterPad mp_outputs[] = {
750  {
751  .name = "default",
752  .type = AVMEDIA_TYPE_VIDEO,
753  .request_frame = request_frame,
754  .config_props = config_outprops,
755  },
756  { NULL }
757 };
758 
760  .name = "mp",
761  .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
762  .init = init,
763  .uninit = uninit,
764  .priv_size = sizeof(MPContext),
766  .inputs = mp_inputs,
767  .outputs = mp_outputs,
768  .priv_class = &mp_class,
769 };