FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h263.h"
36 #include "h264chroma.h"
37 #include "internal.h"
38 #include "mpegutils.h"
39 #include "mpegvideo.h"
40 #include "mpegvideodec.h"
41 #include "mpeg4videodec.h"
42 #include "libavutil/refstruct.h"
43 #include "thread.h"
44 #include "threadprogress.h"
45 #include "wmv2dec.h"
46 
47 #define H264_CHROMA_MC(OPNAME, OP)\
48 static void OPNAME ## h264_chroma_mc1(uint8_t *dst /*align 8*/, const uint8_t *src /*align 1*/, ptrdiff_t stride, int h, int x, int y)\
49 {\
50  const int A = (8-x) * (8-y);\
51  const int B = ( x) * (8-y);\
52  const int C = (8-x) * ( y);\
53  const int D = ( x) * ( y);\
54  \
55  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);\
56 \
57  if (D) {\
58  for (int i = 0; i < h; ++i) {\
59  OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
60  dst += stride;\
61  src += stride;\
62  }\
63  } else if (B + C) {\
64  const int E = B + C;\
65  const int step = C ? stride : 1;\
66  for (int i = 0; i < h; ++i) {\
67  OP(dst[0], (A*src[0] + E*src[step+0]));\
68  dst += stride;\
69  src += stride;\
70  }\
71  } else {\
72  for (int i = 0; i < h; ++i) {\
73  OP(dst[0], (A*src[0]));\
74  dst += stride;\
75  src += stride;\
76  }\
77  }\
78 }\
79 
80 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
81 #define op_put(a, b) a = (((b) + 32)>>6)
82 
85 
87 {
88  enum ThreadingStatus thread_status;
89 
91 
92  s->avctx = avctx;
93  s->width = avctx->coded_width;
94  s->height = avctx->coded_height;
95  s->codec_id = avctx->codec->id;
96  s->workaround_bugs = avctx->workaround_bugs;
97 
98  /* convert fourcc to upper case */
99  s->codec_tag = ff_toupper4(avctx->codec_tag);
100 
102 
103  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
104  s->h264chroma.avg_h264_chroma_pixels_tab[3] = avg_h264_chroma_mc1;
105  s->h264chroma.put_h264_chroma_pixels_tab[3] = put_h264_chroma_mc1;
106 
107  if (s->picture_pool) // VC-1 can call this multiple times
108  return 0;
109 
110  thread_status = ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool));
111  if (thread_status != FF_THREAD_IS_COPY) {
112  s->picture_pool = ff_mpv_alloc_pic_pool(thread_status != FF_THREAD_NO_FRAME_THREADING);
113  if (!s->picture_pool)
114  return AVERROR(ENOMEM);
115  }
116  return 0;
117 }
118 
120  const AVCodecContext *src)
121 {
122  MpegEncContext *const s1 = src->priv_data;
123  MpegEncContext *const s = dst->priv_data;
124  int ret = 0;
125 
126  if (dst == src)
127  return 0;
128 
129  av_assert0(s != s1);
130 
131  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
132  s->height = s1->height;
133  s->width = s1->width;
135  return ret;
136  ret = 1;
137  }
138 
139  s->quarter_sample = s1->quarter_sample;
140 
141  ff_mpv_replace_picture(&s->cur_pic, &s1->cur_pic);
142  ff_mpv_replace_picture(&s->last_pic, &s1->last_pic);
143  ff_mpv_replace_picture(&s->next_pic, &s1->next_pic);
144 
145  s->linesize = s1->linesize;
146  s->uvlinesize = s1->uvlinesize;
147 
148  // Error/bug resilience
149  s->workaround_bugs = s1->workaround_bugs;
150 
151  // MPEG-4 timing info
152  memcpy(&s->last_time_base, &s1->last_time_base,
153  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
154  (char *) &s1->last_time_base);
155 
156  // B-frame info
157  s->low_delay = s1->low_delay;
158 
159  // MPEG-2/interlacing info
160  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
161  (char *) &s1->first_field + sizeof(s1->first_field) - (char *) &s1->progressive_sequence);
162 
163  return ret;
164 }
165 
167 {
169 
170  av_refstruct_pool_uninit(&s->picture_pool);
172  return 0;
173 }
174 
176 {
177  int err = 0;
178 
179  if (!s->context_initialized)
180  return AVERROR(EINVAL);
181 
183 
184  ff_mpv_unref_picture(&s->last_pic);
185  ff_mpv_unref_picture(&s->next_pic);
186  ff_mpv_unref_picture(&s->cur_pic);
187 
188  if ((s->width || s->height) &&
189  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
190  goto fail;
191 
192  /* set chroma shifts */
193  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
194  &s->chroma_x_shift,
195  &s->chroma_y_shift);
196  if (err < 0)
197  goto fail;
198 
199  if ((err = ff_mpv_init_context_frame(s)))
200  goto fail;
201 
202  memset(s->thread_context, 0, sizeof(s->thread_context));
203  s->thread_context[0] = s;
204 
205  if (s->width && s->height) {
207  if (err < 0)
208  goto fail;
209  }
210  s->context_reinit = 0;
211 
212  return 0;
213  fail:
215  s->context_reinit = 1;
216  return err;
217 }
218 
219 static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
220 {
221  AVCodecContext *avctx = s->avctx;
222  MPVPicture *pic = av_refstruct_pool_get(s->picture_pool);
223  int ret;
224 
225  if (!pic)
226  return AVERROR(ENOMEM);
227 
228  dst->ptr = pic;
229 
230  pic->reference = reference;
231 
232  /* WM Image / Screen codecs allocate internal buffers with different
233  * dimensions / colorspaces; ignore user-defined callbacks for these. */
238  reference ? AV_GET_BUFFER_FLAG_REF : 0);
239  } else {
240  pic->f->width = avctx->width;
241  pic->f->height = avctx->height;
242  pic->f->format = avctx->pix_fmt;
244  }
245  if (ret < 0)
246  goto fail;
247 
248  ret = ff_mpv_pic_check_linesize(avctx, pic->f, &s->linesize, &s->uvlinesize);
249  if (ret < 0)
250  goto fail;
251 
253  if (ret < 0)
254  goto fail;
255 
256  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
257  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height ||
258  FFALIGN(s->mb_height, 2) == s->buffer_pools.alloc_mb_height);
259  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
260  ret = ff_mpv_alloc_pic_accessories(s->avctx, dst, &s->sc,
261  &s->buffer_pools, s->mb_height);
262  if (ret < 0)
263  goto fail;
264 
265  return 0;
266 fail:
268  return ret;
269 }
270 
272 {
273  MPVPicture *pic;
274  int ret = alloc_picture(s, dst, 1);
275  if (ret < 0)
276  return ret;
277 
278  pic = dst->ptr;
279  pic->dummy = 1;
280 
281  ff_thread_progress_report(&pic->progress, INT_MAX);
282 
283  return 0;
284 }
285 
286 static void color_frame(AVFrame *frame, int luma)
287 {
288  int h_chroma_shift, v_chroma_shift;
289 
290  for (int i = 0; i < frame->height; i++)
291  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
292 
293  if (!frame->data[1])
294  return;
295  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
296  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
297  memset(frame->data[1] + frame->linesize[1] * i,
298  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
299  memset(frame->data[2] + frame->linesize[2] * i,
300  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
301  }
302 }
303 
305 {
306  AVCodecContext *avctx = s->avctx;
307  int ret;
308 
309  av_assert1(!s->last_pic.ptr || s->last_pic.ptr->f->buf[0]);
310  av_assert1(!s->next_pic.ptr || s->next_pic.ptr->f->buf[0]);
311  if (!s->last_pic.ptr && s->pict_type != AV_PICTURE_TYPE_I) {
312  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_pic.ptr)
314  "allocating dummy last picture for B frame\n");
315  else if (s->codec_id != AV_CODEC_ID_H261 /* H.261 has no keyframes */ &&
316  (s->picture_structure == PICT_FRAME || s->first_field))
318  "warning: first frame is no keyframe\n");
319 
320  /* Allocate a dummy frame */
321  ret = alloc_dummy_frame(s, &s->last_pic);
322  if (ret < 0)
323  return ret;
324 
325  if (!avctx->hwaccel) {
326  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
327  color_frame(s->last_pic.ptr->f, luma_val);
328  }
329  }
330  if (!s->next_pic.ptr && s->pict_type == AV_PICTURE_TYPE_B) {
331  /* Allocate a dummy frame */
332  ret = alloc_dummy_frame(s, &s->next_pic);
333  if (ret < 0)
334  return ret;
335  }
336 
337  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_pic.ptr &&
338  s->last_pic.ptr->f->buf[0]));
339 
340  return 0;
341 }
342 
343 /**
344  * generic function called after decoding
345  * the header and before a frame is decoded.
346  */
348 {
349  int ret;
350 
351  s->mb_skipped = 0;
352 
354  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
355  return AVERROR_BUG;
356  }
357 
358  ff_mpv_unref_picture(&s->cur_pic);
359  ret = alloc_picture(s, &s->cur_pic,
360  s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
361  if (ret < 0)
362  return ret;
363 
364  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
365  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_INTERLACED *
366  (!s->progressive_frame && !s->progressive_sequence);
367  s->cur_pic.ptr->field_picture = s->picture_structure != PICT_FRAME;
368 
369  s->cur_pic.ptr->f->pict_type = s->pict_type;
370  if (s->pict_type == AV_PICTURE_TYPE_I)
371  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
372  else
373  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
374 
375  if (s->pict_type != AV_PICTURE_TYPE_B) {
376  ff_mpv_workpic_from_pic(&s->last_pic, s->next_pic.ptr);
377  if (!s->droppable)
378  ff_mpv_workpic_from_pic(&s->next_pic, s->cur_pic.ptr);
379  }
380  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
381  (void*)s->last_pic.ptr, (void*)s->next_pic.ptr, (void*)s->cur_pic.ptr,
382  s->last_pic.ptr ? s->last_pic.ptr->f->data[0] : NULL,
383  s->next_pic.ptr ? s->next_pic.ptr->f->data[0] : NULL,
384  s->cur_pic.ptr ? s->cur_pic.ptr->f->data[0] : NULL,
385  s->pict_type, s->droppable);
386 
388  if (ret < 0)
389  return ret;
390 
391  if (s->avctx->debug & FF_DEBUG_NOMC)
392  color_frame(s->cur_pic.ptr->f, 0x80);
393 
394  return 0;
395 }
396 
397 /* called after a frame has been decoded. */
399 {
400  emms_c();
401 
402  if (s->cur_pic.reference)
403  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
404 }
405 
407 {
408  ff_print_debug_info2(s->avctx, pict, p->mb_type,
409  p->qscale_table, p->motion_val,
410  p->mb_width, p->mb_height, p->mb_stride, s->quarter_sample);
411 }
412 
414  const MPVPicture *p, int qp_type)
415 {
416  AVVideoEncParams *par;
417  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
418  unsigned int nb_mb = p->mb_height * p->mb_width;
419 
420  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
421  return 0;
422 
424  if (!par)
425  return AVERROR(ENOMEM);
426 
427  for (unsigned y = 0; y < p->mb_height; y++)
428  for (unsigned x = 0; x < p->mb_width; x++) {
429  const unsigned int block_idx = y * p->mb_width + x;
430  const unsigned int mb_xy = y * p->mb_stride + x;
431  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
432 
433  b->src_x = x * 16;
434  b->src_y = y * 16;
435  b->w = 16;
436  b->h = 16;
437 
438  b->delta_qp = p->qscale_table[mb_xy] * mult;
439  }
440 
441  return 0;
442 }
443 
445 {
446  ff_draw_horiz_band(s->avctx, s->cur_pic.ptr->f,
447  s->last_pic.ptr ? s->last_pic.ptr->f : NULL,
448  y, h, s->picture_structure,
449  s->first_field, s->low_delay);
450 }
451 
453 {
454  MpegEncContext *const s = avctx->priv_data;
455 
456  ff_mpv_unref_picture(&s->cur_pic);
457  ff_mpv_unref_picture(&s->last_pic);
458  ff_mpv_unref_picture(&s->next_pic);
459 
460  s->mb_x = s->mb_y = 0;
461 
462  s->pp_time = 0;
463 }
464 
466  uint8_t *dest, const uint8_t *src,
467  int field_based, int field_select,
468  int src_x, int src_y,
469  int width, int height, ptrdiff_t stride,
470  int h_edge_pos, int v_edge_pos,
471  int w, int h, const h264_chroma_mc_func *pix_op,
472  int motion_x, int motion_y)
473 {
474  const int lowres = s->avctx->lowres;
475  const int op_index = lowres;
476  const int s_mask = (2 << lowres) - 1;
477  int emu = 0;
478  int sx, sy;
479 
480  av_assert2(op_index <= 3);
481 
482  if (s->quarter_sample) {
483  motion_x /= 2;
484  motion_y /= 2;
485  }
486 
487  sx = motion_x & s_mask;
488  sy = motion_y & s_mask;
489  src_x += motion_x >> lowres + 1;
490  src_y += motion_y >> lowres + 1;
491 
492  src += src_y * stride + src_x;
493 
494  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
495  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
496  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
497  s->linesize, s->linesize,
498  w + 1, (h + 1) << field_based,
499  src_x, src_y * (1 << field_based),
501  src = s->sc.edge_emu_buffer;
502  emu = 1;
503  }
504 
505  sx = (sx << 2) >> lowres;
506  sy = (sy << 2) >> lowres;
507  if (field_select)
508  src += s->linesize;
509  pix_op[op_index](dest, src, stride, h, sx, sy);
510  return emu;
511 }
512 
513 /* apply one mpeg motion vector to the three components */
515  uint8_t *dest_y,
516  uint8_t *dest_cb,
517  uint8_t *dest_cr,
518  int field_based,
519  int bottom_field,
520  int field_select,
521  uint8_t *const *ref_picture,
522  const h264_chroma_mc_func *pix_op,
523  int motion_x, int motion_y,
524  int h, int mb_y)
525 {
526  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
527  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
528  ptrdiff_t uvlinesize, linesize;
529  const int lowres = s->avctx->lowres;
530  const int op_index = lowres - 1 + s->chroma_x_shift;
531  const int block_s = 8 >> lowres;
532  const int s_mask = (2 << lowres) - 1;
533  const int h_edge_pos = s->h_edge_pos >> lowres;
534  const int v_edge_pos = s->v_edge_pos >> lowres;
535  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
536 
537  av_assert2(op_index <= 3);
538 
539  linesize = s->cur_pic.linesize[0] << field_based;
540  uvlinesize = s->cur_pic.linesize[1] << field_based;
541 
542  // FIXME obviously not perfect but qpel will not work in lowres anyway
543  if (s->quarter_sample) {
544  motion_x /= 2;
545  motion_y /= 2;
546  }
547 
548  if (field_based) {
549  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
550  }
551 
552  sx = motion_x & s_mask;
553  sy = motion_y & s_mask;
554  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
555  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
556 
557  if (s->out_format == FMT_H263) {
558  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
559  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
560  uvsrc_x = src_x >> 1;
561  uvsrc_y = src_y >> 1;
562  } else if (s->out_format == FMT_H261) {
563  // even chroma mv's are full pel in H261
564  mx = motion_x / 4;
565  my = motion_y / 4;
566  uvsx = (2 * mx) & s_mask;
567  uvsy = (2 * my) & s_mask;
568  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
569  uvsrc_y = mb_y * block_s + (my >> lowres);
570  } else {
571  if (s->chroma_y_shift) {
572  mx = motion_x / 2;
573  my = motion_y / 2;
574  uvsx = mx & s_mask;
575  uvsy = my & s_mask;
576  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
577  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
578  } else {
579  if (s->chroma_x_shift) {
580  //Chroma422
581  mx = motion_x / 2;
582  uvsx = mx & s_mask;
583  uvsy = motion_y & s_mask;
584  uvsrc_y = src_y;
585  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
586  } else {
587  //Chroma444
588  uvsx = motion_x & s_mask;
589  uvsy = motion_y & s_mask;
590  uvsrc_x = src_x;
591  uvsrc_y = src_y;
592  }
593  }
594  }
595 
596  ptr_y = ref_picture[0] + src_y * linesize + src_x;
597  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
598  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
599 
600  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
601  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, field_select + hc<<s->chroma_y_shift), 0)) {
602  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
603  linesize >> field_based, linesize >> field_based,
604  17, 17 + field_based,
605  src_x, src_y * (1 << field_based), h_edge_pos,
606  v_edge_pos);
607  ptr_y = s->sc.edge_emu_buffer;
608  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
609  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
610  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
611  if (s->workaround_bugs & FF_BUG_IEDGE)
612  vbuf -= s->uvlinesize;
613  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
614  uvlinesize >> field_based, uvlinesize >> field_based,
615  9, 9 + field_based,
616  uvsrc_x, uvsrc_y * (1 << field_based),
617  h_edge_pos >> 1, v_edge_pos >> 1);
618  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
619  uvlinesize >> field_based,uvlinesize >> field_based,
620  9, 9 + field_based,
621  uvsrc_x, uvsrc_y * (1 << field_based),
622  h_edge_pos >> 1, v_edge_pos >> 1);
623  ptr_cb = ubuf;
624  ptr_cr = vbuf;
625  }
626  }
627 
628  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
629  if (bottom_field) {
630  dest_y += s->linesize;
631  dest_cb += s->uvlinesize;
632  dest_cr += s->uvlinesize;
633  }
634 
635  if (field_select) {
636  ptr_y += s->linesize;
637  ptr_cb += s->uvlinesize;
638  ptr_cr += s->uvlinesize;
639  }
640 
641  sx = (sx << 2) >> lowres;
642  sy = (sy << 2) >> lowres;
643  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
644 
645  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
646  uvsx = (uvsx << 2) >> lowres;
647  uvsy = (uvsy << 2) >> lowres;
648  if (hc) {
649  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
650  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
651  }
652  }
653  // FIXME h261 lowres loop filter
654 }
655 
657  uint8_t *dest_cb, uint8_t *dest_cr,
658  uint8_t *const *ref_picture,
659  const h264_chroma_mc_func * pix_op,
660  int mx, int my)
661 {
662  const int lowres = s->avctx->lowres;
663  const int op_index = lowres;
664  const int block_s = 8 >> lowres;
665  const int s_mask = (2 << lowres) - 1;
666  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
667  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
668  int emu = 0, src_x, src_y, sx, sy;
669  ptrdiff_t offset;
670  const uint8_t *ptr;
671 
672  av_assert2(op_index <= 3);
673 
674  if (s->quarter_sample) {
675  mx /= 2;
676  my /= 2;
677  }
678 
679  /* In case of 8X8, we construct a single chroma motion vector
680  with a special rounding */
683 
684  sx = mx & s_mask;
685  sy = my & s_mask;
686  src_x = s->mb_x * block_s + (mx >> lowres + 1);
687  src_y = s->mb_y * block_s + (my >> lowres + 1);
688 
689  offset = src_y * s->uvlinesize + src_x;
690  ptr = ref_picture[1] + offset;
691  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
692  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
693  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
694  s->uvlinesize, s->uvlinesize,
695  9, 9,
696  src_x, src_y, h_edge_pos, v_edge_pos);
697  ptr = s->sc.edge_emu_buffer;
698  emu = 1;
699  }
700  sx = (sx << 2) >> lowres;
701  sy = (sy << 2) >> lowres;
702  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
703 
704  ptr = ref_picture[2] + offset;
705  if (emu) {
706  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
707  s->uvlinesize, s->uvlinesize,
708  9, 9,
709  src_x, src_y, h_edge_pos, v_edge_pos);
710  ptr = s->sc.edge_emu_buffer;
711  }
712  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
713 }
714 
715 /**
716  * motion compensation of a single macroblock
717  * @param s context
718  * @param dest_y luma destination pointer
719  * @param dest_cb chroma cb/u destination pointer
720  * @param dest_cr chroma cr/v destination pointer
721  * @param dir direction (0->forward, 1->backward)
722  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
723  * @param pix_op halfpel motion compensation function (average or put normally)
724  * the motion vectors are taken from s->mv and the MV type from s->mv_type
725  */
726 static inline void MPV_motion_lowres(MpegEncContext *s,
727  uint8_t *dest_y, uint8_t *dest_cb,
728  uint8_t *dest_cr,
729  int dir, uint8_t *const *ref_picture,
730  const h264_chroma_mc_func *pix_op)
731 {
732  int mx, my;
733  int mb_x, mb_y;
734  const int lowres = s->avctx->lowres;
735  const int block_s = 8 >>lowres;
736 
737  mb_x = s->mb_x;
738  mb_y = s->mb_y;
739 
740  switch (s->mv_type) {
741  case MV_TYPE_16X16:
742  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
743  0, 0, 0,
744  ref_picture, pix_op,
745  s->mv[dir][0][0], s->mv[dir][0][1],
746  2 * block_s, mb_y);
747  break;
748  case MV_TYPE_8X8:
749  mx = 0;
750  my = 0;
751  for (int i = 0; i < 4; i++) {
752  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
753  s->linesize) * block_s,
754  ref_picture[0], 0, 0,
755  (2 * mb_x + (i & 1)) * block_s,
756  (2 * mb_y + (i >> 1)) * block_s,
757  s->width, s->height, s->linesize,
758  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
759  block_s, block_s, pix_op,
760  s->mv[dir][i][0], s->mv[dir][i][1]);
761 
762  mx += s->mv[dir][i][0];
763  my += s->mv[dir][i][1];
764  }
765 
766  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
767  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
768  pix_op, mx, my);
769  break;
770  case MV_TYPE_FIELD:
771  if (s->picture_structure == PICT_FRAME) {
772  /* top field */
773  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
774  1, 0, s->field_select[dir][0],
775  ref_picture, pix_op,
776  s->mv[dir][0][0], s->mv[dir][0][1],
777  block_s, mb_y);
778  /* bottom field */
779  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
780  1, 1, s->field_select[dir][1],
781  ref_picture, pix_op,
782  s->mv[dir][1][0], s->mv[dir][1][1],
783  block_s, mb_y);
784  } else {
785  if (s->picture_structure != s->field_select[dir][0] + 1 &&
786  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
787  ref_picture = s->cur_pic.ptr->f->data;
788  }
789  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
790  0, 0, s->field_select[dir][0],
791  ref_picture, pix_op,
792  s->mv[dir][0][0],
793  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
794  }
795  break;
796  case MV_TYPE_16X8:
797  for (int i = 0; i < 2; i++) {
798  uint8_t *const *ref2picture;
799 
800  if (s->picture_structure == s->field_select[dir][i] + 1 ||
801  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
802  ref2picture = ref_picture;
803  } else {
804  ref2picture = s->cur_pic.ptr->f->data;
805  }
806 
807  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
808  0, 0, s->field_select[dir][i],
809  ref2picture, pix_op,
810  s->mv[dir][i][0], s->mv[dir][i][1] +
811  2 * block_s * i, block_s, mb_y >> 1);
812 
813  dest_y += 2 * block_s * s->linesize;
814  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
815  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
816  }
817  break;
818  case MV_TYPE_DMV:
819  if (s->picture_structure == PICT_FRAME) {
820  for (int i = 0; i < 2; i++) {
821  for (int j = 0; j < 2; j++) {
822  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
823  1, j, j ^ i,
824  ref_picture, pix_op,
825  s->mv[dir][2 * i + j][0],
826  s->mv[dir][2 * i + j][1],
827  block_s, mb_y);
828  }
829  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
830  }
831  } else {
832  for (int i = 0; i < 2; i++) {
833  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
834  0, 0, s->picture_structure != i + 1,
835  ref_picture, pix_op,
836  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
837  2 * block_s, mb_y >> 1);
838 
839  // after put we make avg of the same block
840  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
841 
842  // opposite parity is always in the same
843  // frame if this is second field
844  if (!s->first_field) {
845  ref_picture = s->cur_pic.ptr->f->data;
846  }
847  }
848  }
849  break;
850  default:
851  av_unreachable("No other mpegvideo MV types exist");
852  }
853 }
854 
855 /**
856  * find the lowest MB row referenced in the MVs
857  */
859 {
860  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
861  int off, mvs;
862 
863  if (s->picture_structure != PICT_FRAME || s->mcsel)
864  goto unhandled;
865 
866  switch (s->mv_type) {
867  case MV_TYPE_16X16:
868  mvs = 1;
869  break;
870  case MV_TYPE_16X8:
871  mvs = 2;
872  break;
873  case MV_TYPE_8X8:
874  mvs = 4;
875  break;
876  default:
877  goto unhandled;
878  }
879 
880  for (int i = 0; i < mvs; i++) {
881  int my = s->mv[dir][i][1];
882  my_max = FFMAX(my_max, my);
883  my_min = FFMIN(my_min, my);
884  }
885 
886  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
887 
888  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
889 unhandled:
890  return s->mb_height - 1;
891 }
892 
893 /* add block[] to dest[] */
894 static inline void add_dct(MpegEncContext *s,
895  int16_t block[][64], int i, uint8_t *dest, int line_size)
896 {
897  if (s->block_last_index[i] >= 0) {
898  s->idsp.idct_add(dest, line_size, block[i]);
899  }
900 }
901 
902 /* put block[] to dest[] */
903 static inline void put_dct(MpegEncContext *s,
904  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
905 {
906  s->dct_unquantize_intra(s, block, i, qscale);
907  s->idsp.idct_put(dest, line_size, block);
908 }
909 
910 static inline void add_dequant_dct(MpegEncContext *s,
911  int16_t block[][64], int i, uint8_t *dest, int line_size, int qscale)
912 {
913  if (s->block_last_index[i] >= 0) {
914  s->dct_unquantize_inter(s, block[i], i, qscale);
915 
916  s->idsp.idct_add(dest, line_size, block[i]);
917  }
918 }
919 
920 #define NOT_MPEG12_H261 0
921 #define MAY_BE_MPEG12_H261 1
922 #define DEFINITELY_MPEG12_H261 2
923 
924 /* generic function called after a macroblock has been parsed by the decoder.
925 
926  Important variables used:
927  s->mb_intra : true if intra macroblock
928  s->mv_dir : motion vector direction
929  s->mv_type : motion vector type
930  s->mv : motion vector
931  s->interlaced_dct : true if interlaced dct used (mpeg2)
932  */
933 static av_always_inline
935  int lowres_flag, int is_mpeg12)
936 {
937 #define IS_MPEG12_H261(s) (is_mpeg12 == MAY_BE_MPEG12_H261 ? ((s)->out_format <= FMT_H261) : is_mpeg12)
938  uint8_t *dest_y = s->dest[0], *dest_cb = s->dest[1], *dest_cr = s->dest[2];
939  int dct_linesize, dct_offset;
940  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
941  const int uvlinesize = s->cur_pic.linesize[1];
942  const int block_size = lowres_flag ? 8 >> s->avctx->lowres : 8;
943 
944  dct_linesize = linesize << s->interlaced_dct;
945  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
946 
947  if (!s->mb_intra) {
948  /* motion handling */
949  if (HAVE_THREADS && is_mpeg12 != DEFINITELY_MPEG12_H261 &&
950  s->avctx->active_thread_type & FF_THREAD_FRAME) {
951  if (s->mv_dir & MV_DIR_FORWARD) {
952  ff_thread_progress_await(&s->last_pic.ptr->progress,
954  }
955  if (s->mv_dir & MV_DIR_BACKWARD) {
956  ff_thread_progress_await(&s->next_pic.ptr->progress,
958  }
959  }
960 
961  if (lowres_flag) {
962  const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
963 
964  if (s->mv_dir & MV_DIR_FORWARD) {
965  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix);
966  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
967  }
968  if (s->mv_dir & MV_DIR_BACKWARD) {
969  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix);
970  }
971  } else {
972  const op_pixels_func (*op_pix)[4];
973  const qpel_mc_func (*op_qpix)[16];
974 
975  if ((is_mpeg12 == DEFINITELY_MPEG12_H261 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
976  op_pix = s->hdsp.put_pixels_tab;
977  op_qpix = s->qdsp.put_qpel_pixels_tab;
978  } else {
979  op_pix = s->hdsp.put_no_rnd_pixels_tab;
980  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
981  }
982  if (s->mv_dir & MV_DIR_FORWARD) {
983  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix, op_qpix);
984  op_pix = s->hdsp.avg_pixels_tab;
985  op_qpix = s->qdsp.avg_qpel_pixels_tab;
986  }
987  if (s->mv_dir & MV_DIR_BACKWARD) {
988  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix, op_qpix);
989  }
990  }
991 
992  /* skip dequant / idct if we are really late ;) */
993  if (s->avctx->skip_idct) {
994  if ( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
995  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
996  || s->avctx->skip_idct >= AVDISCARD_ALL)
997  return;
998  }
999 
1000  /* add dct residue */
1001  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && s->dct_unquantize_inter) {
1002  // H.263, H.263+, H.263I, FLV, RV10, RV20 and MPEG-4 with MPEG-2 quantization
1003  add_dequant_dct(s, block, 0, dest_y , dct_linesize, s->qscale);
1004  add_dequant_dct(s, block, 1, dest_y + block_size, dct_linesize, s->qscale);
1005  add_dequant_dct(s, block, 2, dest_y + dct_offset , dct_linesize, s->qscale);
1006  add_dequant_dct(s, block, 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1007 
1008  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1009  av_assert2(s->chroma_y_shift);
1010  add_dequant_dct(s, block, 4, dest_cb, uvlinesize, s->chroma_qscale);
1011  add_dequant_dct(s, block, 5, dest_cr, uvlinesize, s->chroma_qscale);
1012  }
1013  } else if (is_mpeg12 == DEFINITELY_MPEG12_H261 || lowres_flag || (s->codec_id != AV_CODEC_ID_WMV2)) {
1014  // H.261, MPEG-1, MPEG-2, MPEG-4 with H.263 quantization,
1015  // MSMP4V1-3 and WMV1.
1016  // Also RV30, RV40 and the VC-1 family when performing error resilience,
1017  // but all blocks are skipped in this case.
1018  add_dct(s, block, 0, dest_y , dct_linesize);
1019  add_dct(s, block, 1, dest_y + block_size, dct_linesize);
1020  add_dct(s, block, 2, dest_y + dct_offset , dct_linesize);
1021  add_dct(s, block, 3, dest_y + dct_offset + block_size, dct_linesize);
1022 
1023  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1024  if (s->chroma_y_shift) {//Chroma420
1025  add_dct(s, block, 4, dest_cb, uvlinesize);
1026  add_dct(s, block, 5, dest_cr, uvlinesize);
1027  } else {
1028  //chroma422
1029  dct_linesize = uvlinesize << s->interlaced_dct;
1030  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1031 
1032  add_dct(s, block, 4, dest_cb, dct_linesize);
1033  add_dct(s, block, 5, dest_cr, dct_linesize);
1034  add_dct(s, block, 6, dest_cb + dct_offset, dct_linesize);
1035  add_dct(s, block, 7, dest_cr + dct_offset, dct_linesize);
1036  if (!s->chroma_x_shift) {//Chroma444
1037  add_dct(s, block, 8, dest_cb + block_size, dct_linesize);
1038  add_dct(s, block, 9, dest_cr + block_size, dct_linesize);
1039  add_dct(s, block, 10, dest_cb + block_size + dct_offset, dct_linesize);
1040  add_dct(s, block, 11, dest_cr + block_size + dct_offset, dct_linesize);
1041  }
1042  }
1043  } //fi gray
1044  } else if (CONFIG_WMV2_DECODER) {
1045  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1046  }
1047  } else {
1048  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1049  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1050  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && CONFIG_MPEG4_DECODER &&
1051  /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1052  s->avctx->bits_per_raw_sample > 8) {
1053  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1054  uvlinesize, dct_linesize, dct_offset);
1055  } else if (!IS_MPEG12_H261(s)) {
1056  /* dct only in intra block */
1057  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1058  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1059  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1060  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1061 
1062  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1063  if (s->chroma_y_shift) {
1064  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1065  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1066  } else {
1067  dct_offset >>= 1;
1068  dct_linesize >>= 1;
1069  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1070  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1071  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1072  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1073  }
1074  }
1075  } else {
1076  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1077  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1078  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1079  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1080 
1081  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1082  if (s->chroma_y_shift) {
1083  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1084  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1085  } else {
1086  dct_linesize = uvlinesize << s->interlaced_dct;
1087  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1088 
1089  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1090  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1091  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1092  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1093  if (!s->chroma_x_shift) { //Chroma444
1094  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1095  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1096  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1097  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1098  }
1099  }
1100  } //gray
1101  }
1102  }
1103 }
1104 
1105 static av_cold void debug_dct_coeffs(MPVContext *s, const int16_t block[][64])
1106 {
1107  if (!block) // happens when called via error resilience
1108  return;
1109 
1110  void *const logctx = s->avctx;
1111  const uint8_t *const idct_permutation = s->idsp.idct_permutation;
1112 
1113  /* print DCT coefficients */
1114  av_log(logctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1115  for (int i = 0; i < 6; i++) {
1116  for (int j = 0; j < 64; j++) {
1117  av_log(logctx, AV_LOG_DEBUG, "%5d",
1118  block[i][idct_permutation[j]]);
1119  }
1120  av_log(logctx, AV_LOG_DEBUG, "\n");
1121  }
1122 }
1123 
1125 {
1126  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1127  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1128 
1129  s->cur_pic.qscale_table[mb_xy] = s->qscale;
1130 
1131  /* avoid copy if macroblock skipped in last frame too */
1132  if (s->mb_skipped) {
1133  s->mb_skipped = 0;
1134  av_assert2(s->pict_type != AV_PICTURE_TYPE_I);
1135  *mbskip_ptr = 1;
1136  } else if (!s->cur_pic.reference) {
1137  *mbskip_ptr = 1;
1138  } else{
1139  *mbskip_ptr = 0; /* not skipped */
1140  }
1141 
1142  if (s->avctx->debug & FF_DEBUG_DCT_COEFF)
1144 
1145  av_assert2((s->out_format <= FMT_H261) == (s->out_format == FMT_H261 || s->out_format == FMT_MPEG1));
1146  if (!s->avctx->lowres) {
1147 #if !CONFIG_SMALL
1148  if (s->out_format <= FMT_H261)
1150  else
1152 #else
1154 #endif
1155  } else
1157 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:174
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:247
av_clip
#define av_clip
Definition: common.h:100
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1327
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:86
threadprogress.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:118
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:176
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:1002
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:903
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
MpegEncContext::workaround_bugs
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:92
AVFrame::width
int width
Definition: frame.h:499
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
op_avg
#define op_avg(a, b)
Definition: mpegvideo_dec.c:80
b
#define b
Definition: input.c:42
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:198
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:86
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpegvideo_dec.c:921
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:54
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:86
mpegutils.h
thread.h
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:171
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:178
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:53
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
fail
#define fail()
Definition: checkasm.h:208
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:103
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:465
MpegEncContext::width
int width
Definition: mpegvideo.h:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
ff_mpv_init_context_frame
av_cold int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:210
MPVPicture::dummy
int dummy
Picture is a dummy and should not be output.
Definition: mpegpicture.h:81
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:106
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpeg_flush
av_cold void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:452
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2278
s
#define s(width, name)
Definition: cbs_vp9.c:198
MpegEncContext::last_time_base
int last_time_base
Definition: mpegvideo.h:220
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:101
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1038
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
decode.h
limits.h
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:134
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:55
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:447
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:230
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
MpegEncContext::picture_pool
struct AVRefStructPool * picture_pool
Pool for MPVPictures.
Definition: mpegvideo.h:105
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:185
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:413
NULL
#define NULL
Definition: coverity.c:32
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:193
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:96
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:128
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:190
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:166
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
DEFINITELY_MPEG12_H261
#define DEFINITELY_MPEG12_H261
Definition: mpegvideo_dec.c:922
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1342
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
IS_MPEG12_H261
#define IS_MPEG12_H261(s)
lowres
static int lowres
Definition: ffplay.c:330
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
alloc_dummy_frame
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
Definition: mpegvideo_dec.c:271
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:175
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:304
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
MpegEncContext::pb_field_time
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:227
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
f
f
Definition: af_crystalizer.c:122
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:155
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:154
height
#define height
Definition: dsp.h:89
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: h263.h:30
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:101
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:444
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
op_put
#define op_put(a, b)
Definition: mpegvideo_dec.c:81
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:229
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:347
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t block[][64], int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:910
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:177
FF_THREAD_NO_FRAME_THREADING
@ FF_THREAD_NO_FRAME_THREADING
Definition: thread.h:63
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:286
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:858
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:221
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
emms.h
MPVPicture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:75
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:253
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:406
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo_dec.c:934
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:104
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:514
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:193
av_always_inline
#define av_always_inline
Definition: attributes.h:63
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
add_dct
static void add_dct(MpegEncContext *s, int16_t block[][64], int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:894
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:122
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:726
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1389
avcodec.h
ff_mpv_workpic_from_pic
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
Definition: mpegpicture.c:128
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:656
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1932
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
update_thread_context for mpegvideo-based decoders.
Definition: mpegvideo_dec.c:119
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:260
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
alloc_picture
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
Definition: mpegvideo_dec.c:219
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:398
NOT_MPEG12_H261
#define NOT_MPEG12_H261
Definition: mpegvideo_dec.c:920
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MPVContext *s, int16_t block[][64])
Definition: mpegvideo_dec.c:1124
MPVWorkPicture
Definition: mpegpicture.h:95
ThreadingStatus
ThreadingStatus
Definition: thread.h:60
MPVPicture::progress
ThreadProgress progress
Definition: mpegpicture.h:92
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:272
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:170
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mpv_free_context_frame
av_cold void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:421
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:228
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
H264_CHROMA_MC
#define H264_CHROMA_MC(OPNAME, OP)
Definition: mpegvideo_dec.c:47
video_enc_params.h
debug_dct_coeffs
static av_cold void debug_dct_coeffs(MPVContext *s, const int16_t block[][64])
Definition: mpegvideo_dec.c:1105
ff_mpv_common_frame_size_change
av_cold int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:175
h263.h