FFmpeg
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h264chroma.h"
36 #include "internal.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "mpeg4videodec.h"
41 #include "thread.h"
42 #include "threadframe.h"
43 #include "wmv2dec.h"
44 
46 {
48 
49  s->avctx = avctx;
50  s->width = avctx->coded_width;
51  s->height = avctx->coded_height;
52  s->codec_id = avctx->codec->id;
53  s->workaround_bugs = avctx->workaround_bugs;
54 
55  /* convert fourcc to upper case */
56  s->codec_tag = ff_toupper4(avctx->codec_tag);
57 
59  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
60 }
61 
63  const AVCodecContext *src)
64 {
65  MpegEncContext *const s1 = src->priv_data;
66  MpegEncContext *const s = dst->priv_data;
67  int ret;
68 
69  if (dst == src)
70  return 0;
71 
72  av_assert0(s != s1);
73 
74  // FIXME can parameters change on I-frames?
75  // in that case dst may need a reinit
76  if (!s->context_initialized) {
77  void *private_ctx = s->private_ctx;
78  int err;
79  memcpy(s, s1, sizeof(*s));
80 
81  s->context_initialized = 0;
82  s->context_reinit = 0;
83  s->avctx = dst;
84  s->private_ctx = private_ctx;
85  s->bitstream_buffer = NULL;
86  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
87 
88  if (s1->context_initialized) {
89  if ((err = ff_mpv_common_init(s)) < 0)
90  return err;
91  }
92  }
93 
94  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
95  s->height = s1->height;
96  s->width = s1->width;
98  return ret;
99  }
100 
101  s->quarter_sample = s1->quarter_sample;
102 
103  s->coded_picture_number = s1->coded_picture_number;
104  s->picture_number = s1->picture_number;
105 
106  av_assert0(!s->picture || s->picture != s1->picture);
107  if (s->picture)
108  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
109  ff_mpeg_unref_picture(&s->picture[i]);
110  if (s1->picture && s1->picture[i].f->buf[0] &&
111  (ret = ff_mpeg_ref_picture(&s->picture[i], &s1->picture[i])) < 0)
112  return ret;
113  }
114 
115 #define UPDATE_PICTURE(pic)\
116 do {\
117  ff_mpeg_unref_picture(&s->pic);\
118  if (s1->pic.f && s1->pic.f->buf[0])\
119  ret = ff_mpeg_ref_picture(&s->pic, &s1->pic);\
120  else\
121  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
122  if (ret < 0)\
123  return ret;\
124 } while (0)
125 
129 
130 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
131  ((pic && pic >= old_ctx->picture && \
132  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
133  &new_ctx->picture[pic - old_ctx->picture] : NULL)
134 
135  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
136  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
137  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
138 
139  // Error/bug resilience
140  s->workaround_bugs = s1->workaround_bugs;
141  s->padding_bug_score = s1->padding_bug_score;
142 
143  // MPEG-4 timing info
144  memcpy(&s->last_time_base, &s1->last_time_base,
145  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
146  (char *) &s1->last_time_base);
147 
148  // B-frame info
149  s->max_b_frames = s1->max_b_frames;
150  s->low_delay = s1->low_delay;
151  s->droppable = s1->droppable;
152 
153  // DivX handling (doesn't work)
154  s->divx_packed = s1->divx_packed;
155 
156  if (s1->bitstream_buffer) {
157  av_fast_padded_malloc(&s->bitstream_buffer,
158  &s->allocated_bitstream_buffer_size,
159  s1->bitstream_buffer_size);
160  if (!s->bitstream_buffer) {
161  s->bitstream_buffer_size = 0;
162  return AVERROR(ENOMEM);
163  }
164  s->bitstream_buffer_size = s1->bitstream_buffer_size;
165  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
166  s1->bitstream_buffer_size);
167  }
168 
169  // linesize-dependent scratch buffer allocation
170  if (!s->sc.edge_emu_buffer)
171  if (s1->linesize) {
172  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
173  &s->sc, s1->linesize) < 0) {
174  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
175  "scratch buffers.\n");
176  return AVERROR(ENOMEM);
177  }
178  } else {
179  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
180  "be allocated due to unknown size.\n");
181  }
182 
183  // MPEG-2/interlacing info
184  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
185  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
186 
187  return 0;
188 }
189 
191 {
192  int err = 0;
193 
194  if (!s->context_initialized)
195  return AVERROR(EINVAL);
196 
198 
199  if (s->picture)
200  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
201  s->picture[i].needs_realloc = 1;
202 
203  s->last_picture_ptr =
204  s->next_picture_ptr =
205  s->current_picture_ptr = NULL;
206 
207  if ((s->width || s->height) &&
208  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
209  goto fail;
210 
211  /* set chroma shifts */
212  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
213  &s->chroma_x_shift,
214  &s->chroma_y_shift);
215  if (err < 0)
216  goto fail;
217 
218  if ((err = ff_mpv_init_context_frame(s)))
219  goto fail;
220 
221  memset(s->thread_context, 0, sizeof(s->thread_context));
222  s->thread_context[0] = s;
223 
224  if (s->width && s->height) {
226  if (err < 0)
227  goto fail;
228  }
229  s->context_reinit = 0;
230 
231  return 0;
232  fail:
234  s->context_reinit = 1;
235  return err;
236 }
237 
238 static int alloc_picture(MpegEncContext *s, Picture **picp, int reference)
239 {
240  AVCodecContext *avctx = s->avctx;
241  int idx = ff_find_unused_picture(s->avctx, s->picture, 0);
242  Picture *pic;
243  int ret;
244 
245  if (idx < 0)
246  return idx;
247 
248  pic = &s->picture[idx];
249 
250  pic->tf.f = pic->f;
251  pic->reference = reference;
252 
253  /* WM Image / Screen codecs allocate internal buffers with different
254  * dimensions / colorspaces; ignore user-defined callbacks for these. */
259  reference ? AV_GET_BUFFER_FLAG_REF : 0);
260  } else {
261  pic->f->width = avctx->width;
262  pic->f->height = avctx->height;
263  pic->f->format = avctx->pix_fmt;
265  }
266  if (ret < 0)
267  goto fail;
268 
270  if (ret < 0)
271  goto fail;
272 
273  ret = ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, s->out_format,
274  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
275  &s->linesize, &s->uvlinesize);
276  if (ret < 0)
277  goto fail;
278  *picp = pic;
279 
280  return 0;
281 fail:
283  return ret;
284 }
285 
287 {
288  Picture *pic;
289  int ret = alloc_picture(s, picp, 1);
290  if (ret < 0)
291  return ret;
292 
293  pic = *picp;
294 
295  ff_thread_report_progress(&pic->tf, INT_MAX, 0);
296  ff_thread_report_progress(&pic->tf, INT_MAX, 1);
297 
298  return 0;
299 }
300 
301 static void color_frame(AVFrame *frame, int luma)
302 {
303  int h_chroma_shift, v_chroma_shift;
304 
305  for (int i = 0; i < frame->height; i++)
306  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
307 
308  if (!frame->data[1])
309  return;
310  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
311  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
312  memset(frame->data[1] + frame->linesize[1] * i,
313  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
314  memset(frame->data[2] + frame->linesize[2] * i,
315  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
316  }
317 }
318 
319 /**
320  * generic function called after decoding
321  * the header and before a frame is decoded.
322  */
324 {
325  int ret;
326 
327  s->mb_skipped = 0;
328 
330  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
331  return AVERROR_BUG;
332  }
333 
334  /* mark & release old frames */
335  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
336  s->last_picture_ptr != s->next_picture_ptr &&
337  s->last_picture_ptr->f->buf[0]) {
338  ff_mpeg_unref_picture(s->last_picture_ptr);
339  }
340 
341  /* release non reference/forgotten frames */
342  for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
343  if (!s->picture[i].reference ||
344  (&s->picture[i] != s->last_picture_ptr &&
345  &s->picture[i] != s->next_picture_ptr &&
346  !s->picture[i].needs_realloc)) {
347  ff_mpeg_unref_picture(&s->picture[i]);
348  }
349  }
350 
351  ff_mpeg_unref_picture(&s->current_picture);
352  ff_mpeg_unref_picture(&s->last_picture);
353  ff_mpeg_unref_picture(&s->next_picture);
354 
355  ret = alloc_picture(s, &s->current_picture_ptr,
356  s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
357  if (ret < 0)
358  return ret;
359 
360  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
361  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (!s->progressive_frame &&
362  !s->progressive_sequence);
363  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
364 
365  s->current_picture_ptr->f->pict_type = s->pict_type;
366  if (s->pict_type == AV_PICTURE_TYPE_I)
367  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY;
368  else
369  s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
370 
371  if ((ret = ff_mpeg_ref_picture(&s->current_picture,
372  s->current_picture_ptr)) < 0)
373  return ret;
374 
375  if (s->pict_type != AV_PICTURE_TYPE_B) {
376  s->last_picture_ptr = s->next_picture_ptr;
377  if (!s->droppable)
378  s->next_picture_ptr = s->current_picture_ptr;
379  }
380  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
381  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
382  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
383  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
384  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
385  s->pict_type, s->droppable);
386 
387  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
388  (s->pict_type != AV_PICTURE_TYPE_I)) {
389  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
391  "allocating dummy last picture for B frame\n");
392  else if (s->codec_id != AV_CODEC_ID_H261)
394  "warning: first frame is no keyframe\n");
395 
396  /* Allocate a dummy frame */
397  ret = alloc_dummy_frame(s, &s->last_picture_ptr);
398  if (ret < 0)
399  return ret;
400 
401  if (!avctx->hwaccel) {
402  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
403  color_frame(s->last_picture_ptr->f, luma_val);
404  }
405 
406  }
407  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
408  s->pict_type == AV_PICTURE_TYPE_B) {
409  /* Allocate a dummy frame */
410  ret = alloc_dummy_frame(s, &s->next_picture_ptr);
411  if (ret < 0)
412  return ret;
413  }
414 
415  if (s->last_picture_ptr) {
416  if (s->last_picture_ptr->f->buf[0] &&
417  (ret = ff_mpeg_ref_picture(&s->last_picture,
418  s->last_picture_ptr)) < 0)
419  return ret;
420  }
421  if (s->next_picture_ptr) {
422  if (s->next_picture_ptr->f->buf[0] &&
423  (ret = ff_mpeg_ref_picture(&s->next_picture,
424  s->next_picture_ptr)) < 0)
425  return ret;
426  }
427 
428  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
429  s->last_picture_ptr->f->buf[0]));
430 
431  /* set dequantizer, we can't do it during init as
432  * it might change for MPEG-4 and we can't do it in the header
433  * decode as init is not called for MPEG-4 there yet */
434  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
435  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
436  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
437  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
438  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
439  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
440  } else {
441  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
442  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
443  }
444 
445  if (s->avctx->debug & FF_DEBUG_NOMC)
446  color_frame(s->current_picture_ptr->f, 0x80);
447 
448  return 0;
449 }
450 
451 /* called after a frame has been decoded. */
453 {
454  emms_c();
455 
456  if (s->current_picture.reference)
457  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
458 }
459 
460 void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
461 {
462  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
463  p->qscale_table, p->motion_val,
464  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
465 }
466 
467 int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
468 {
469  AVVideoEncParams *par;
470  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
471  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
472 
473  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
474  return 0;
475 
477  if (!par)
478  return AVERROR(ENOMEM);
479 
480  for (unsigned y = 0; y < p->alloc_mb_height; y++)
481  for (unsigned x = 0; x < p->alloc_mb_width; x++) {
482  const unsigned int block_idx = y * p->alloc_mb_width + x;
483  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
484  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
485 
486  b->src_x = x * 16;
487  b->src_y = y * 16;
488  b->w = 16;
489  b->h = 16;
490 
491  b->delta_qp = p->qscale_table[mb_xy] * mult;
492  }
493 
494  return 0;
495 }
496 
498 {
499  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
500  s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
501  y, h, s->picture_structure,
502  s->first_field, s->low_delay);
503 }
504 
506 {
507  MpegEncContext *const s = avctx->priv_data;
508 
509  if (!s->picture)
510  return;
511 
512  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
513  ff_mpeg_unref_picture(&s->picture[i]);
514  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
515 
516  ff_mpeg_unref_picture(&s->current_picture);
517  ff_mpeg_unref_picture(&s->last_picture);
518  ff_mpeg_unref_picture(&s->next_picture);
519 
520  s->mb_x = s->mb_y = 0;
521 
522  s->bitstream_buffer_size = 0;
523  s->pp_time = 0;
524 }
525 
527 {
528  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
529  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
530 }
531 
532 
534  uint8_t *dest, const uint8_t *src,
535  int field_based, int field_select,
536  int src_x, int src_y,
537  int width, int height, ptrdiff_t stride,
538  int h_edge_pos, int v_edge_pos,
539  int w, int h, const h264_chroma_mc_func *pix_op,
540  int motion_x, int motion_y)
541 {
542  const int lowres = s->avctx->lowres;
543  const int op_index = FFMIN(lowres, 3);
544  const int s_mask = (2 << lowres) - 1;
545  int emu = 0;
546  int sx, sy;
547 
548  if (s->quarter_sample) {
549  motion_x /= 2;
550  motion_y /= 2;
551  }
552 
553  sx = motion_x & s_mask;
554  sy = motion_y & s_mask;
555  src_x += motion_x >> lowres + 1;
556  src_y += motion_y >> lowres + 1;
557 
558  src += src_y * stride + src_x;
559 
560  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
561  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
562  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
563  s->linesize, s->linesize,
564  w + 1, (h + 1) << field_based,
565  src_x, src_y * (1 << field_based),
567  src = s->sc.edge_emu_buffer;
568  emu = 1;
569  }
570 
571  sx = (sx << 2) >> lowres;
572  sy = (sy << 2) >> lowres;
573  if (field_select)
574  src += s->linesize;
575  pix_op[op_index](dest, src, stride, h, sx, sy);
576  return emu;
577 }
578 
579 /* apply one mpeg motion vector to the three components */
581  uint8_t *dest_y,
582  uint8_t *dest_cb,
583  uint8_t *dest_cr,
584  int field_based,
585  int bottom_field,
586  int field_select,
587  uint8_t *const *ref_picture,
588  const h264_chroma_mc_func *pix_op,
589  int motion_x, int motion_y,
590  int h, int mb_y)
591 {
592  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
593  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
594  ptrdiff_t uvlinesize, linesize;
595  const int lowres = s->avctx->lowres;
596  const int op_index = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
597  const int block_s = 8 >> lowres;
598  const int s_mask = (2 << lowres) - 1;
599  const int h_edge_pos = s->h_edge_pos >> lowres;
600  const int v_edge_pos = s->v_edge_pos >> lowres;
601  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
602  linesize = s->current_picture.f->linesize[0] << field_based;
603  uvlinesize = s->current_picture.f->linesize[1] << field_based;
604 
605  // FIXME obviously not perfect but qpel will not work in lowres anyway
606  if (s->quarter_sample) {
607  motion_x /= 2;
608  motion_y /= 2;
609  }
610 
611  if (field_based) {
612  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
613  }
614 
615  sx = motion_x & s_mask;
616  sy = motion_y & s_mask;
617  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
618  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
619 
620  if (s->out_format == FMT_H263) {
621  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
622  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
623  uvsrc_x = src_x >> 1;
624  uvsrc_y = src_y >> 1;
625  } else if (s->out_format == FMT_H261) {
626  // even chroma mv's are full pel in H261
627  mx = motion_x / 4;
628  my = motion_y / 4;
629  uvsx = (2 * mx) & s_mask;
630  uvsy = (2 * my) & s_mask;
631  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
632  uvsrc_y = mb_y * block_s + (my >> lowres);
633  } else {
634  if (s->chroma_y_shift) {
635  mx = motion_x / 2;
636  my = motion_y / 2;
637  uvsx = mx & s_mask;
638  uvsy = my & s_mask;
639  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
640  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
641  } else {
642  if (s->chroma_x_shift) {
643  //Chroma422
644  mx = motion_x / 2;
645  uvsx = mx & s_mask;
646  uvsy = motion_y & s_mask;
647  uvsrc_y = src_y;
648  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
649  } else {
650  //Chroma444
651  uvsx = motion_x & s_mask;
652  uvsy = motion_y & s_mask;
653  uvsrc_x = src_x;
654  uvsrc_y = src_y;
655  }
656  }
657  }
658 
659  ptr_y = ref_picture[0] + src_y * linesize + src_x;
660  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
661  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
662 
663  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
664  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
665  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
666  linesize >> field_based, linesize >> field_based,
667  17, 17 + field_based,
668  src_x, src_y * (1 << field_based), h_edge_pos,
669  v_edge_pos);
670  ptr_y = s->sc.edge_emu_buffer;
671  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
672  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
673  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
674  if (s->workaround_bugs & FF_BUG_IEDGE)
675  vbuf -= s->uvlinesize;
676  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
677  uvlinesize >> field_based, uvlinesize >> field_based,
678  9, 9 + field_based,
679  uvsrc_x, uvsrc_y * (1 << field_based),
680  h_edge_pos >> 1, v_edge_pos >> 1);
681  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
682  uvlinesize >> field_based,uvlinesize >> field_based,
683  9, 9 + field_based,
684  uvsrc_x, uvsrc_y * (1 << field_based),
685  h_edge_pos >> 1, v_edge_pos >> 1);
686  ptr_cb = ubuf;
687  ptr_cr = vbuf;
688  }
689  }
690 
691  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
692  if (bottom_field) {
693  dest_y += s->linesize;
694  dest_cb += s->uvlinesize;
695  dest_cr += s->uvlinesize;
696  }
697 
698  if (field_select) {
699  ptr_y += s->linesize;
700  ptr_cb += s->uvlinesize;
701  ptr_cr += s->uvlinesize;
702  }
703 
704  sx = (sx << 2) >> lowres;
705  sy = (sy << 2) >> lowres;
706  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
707 
708  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
709  uvsx = (uvsx << 2) >> lowres;
710  uvsy = (uvsy << 2) >> lowres;
711  if (hc) {
712  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
713  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
714  }
715  }
716  // FIXME h261 lowres loop filter
717 }
718 
720  uint8_t *dest_cb, uint8_t *dest_cr,
721  uint8_t *const *ref_picture,
722  const h264_chroma_mc_func * pix_op,
723  int mx, int my)
724 {
725  const int lowres = s->avctx->lowres;
726  const int op_index = FFMIN(lowres, 3);
727  const int block_s = 8 >> lowres;
728  const int s_mask = (2 << lowres) - 1;
729  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
730  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
731  int emu = 0, src_x, src_y, sx, sy;
732  ptrdiff_t offset;
733  const uint8_t *ptr;
734 
735  if (s->quarter_sample) {
736  mx /= 2;
737  my /= 2;
738  }
739 
740  /* In case of 8X8, we construct a single chroma motion vector
741  with a special rounding */
742  mx = ff_h263_round_chroma(mx);
743  my = ff_h263_round_chroma(my);
744 
745  sx = mx & s_mask;
746  sy = my & s_mask;
747  src_x = s->mb_x * block_s + (mx >> lowres + 1);
748  src_y = s->mb_y * block_s + (my >> lowres + 1);
749 
750  offset = src_y * s->uvlinesize + src_x;
751  ptr = ref_picture[1] + offset;
752  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
753  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
754  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
755  s->uvlinesize, s->uvlinesize,
756  9, 9,
757  src_x, src_y, h_edge_pos, v_edge_pos);
758  ptr = s->sc.edge_emu_buffer;
759  emu = 1;
760  }
761  sx = (sx << 2) >> lowres;
762  sy = (sy << 2) >> lowres;
763  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
764 
765  ptr = ref_picture[2] + offset;
766  if (emu) {
767  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
768  s->uvlinesize, s->uvlinesize,
769  9, 9,
770  src_x, src_y, h_edge_pos, v_edge_pos);
771  ptr = s->sc.edge_emu_buffer;
772  }
773  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
774 }
775 
776 /**
777  * motion compensation of a single macroblock
778  * @param s context
779  * @param dest_y luma destination pointer
780  * @param dest_cb chroma cb/u destination pointer
781  * @param dest_cr chroma cr/v destination pointer
782  * @param dir direction (0->forward, 1->backward)
783  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
784  * @param pix_op halfpel motion compensation function (average or put normally)
785  * the motion vectors are taken from s->mv and the MV type from s->mv_type
786  */
787 static inline void MPV_motion_lowres(MpegEncContext *s,
788  uint8_t *dest_y, uint8_t *dest_cb,
789  uint8_t *dest_cr,
790  int dir, uint8_t *const *ref_picture,
791  const h264_chroma_mc_func *pix_op)
792 {
793  int mx, my;
794  int mb_x, mb_y;
795  const int lowres = s->avctx->lowres;
796  const int block_s = 8 >>lowres;
797 
798  mb_x = s->mb_x;
799  mb_y = s->mb_y;
800 
801  switch (s->mv_type) {
802  case MV_TYPE_16X16:
803  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
804  0, 0, 0,
805  ref_picture, pix_op,
806  s->mv[dir][0][0], s->mv[dir][0][1],
807  2 * block_s, mb_y);
808  break;
809  case MV_TYPE_8X8:
810  mx = 0;
811  my = 0;
812  for (int i = 0; i < 4; i++) {
813  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
814  s->linesize) * block_s,
815  ref_picture[0], 0, 0,
816  (2 * mb_x + (i & 1)) * block_s,
817  (2 * mb_y + (i >> 1)) * block_s,
818  s->width, s->height, s->linesize,
819  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
820  block_s, block_s, pix_op,
821  s->mv[dir][i][0], s->mv[dir][i][1]);
822 
823  mx += s->mv[dir][i][0];
824  my += s->mv[dir][i][1];
825  }
826 
827  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
828  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
829  pix_op, mx, my);
830  break;
831  case MV_TYPE_FIELD:
832  if (s->picture_structure == PICT_FRAME) {
833  /* top field */
834  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
835  1, 0, s->field_select[dir][0],
836  ref_picture, pix_op,
837  s->mv[dir][0][0], s->mv[dir][0][1],
838  block_s, mb_y);
839  /* bottom field */
840  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
841  1, 1, s->field_select[dir][1],
842  ref_picture, pix_op,
843  s->mv[dir][1][0], s->mv[dir][1][1],
844  block_s, mb_y);
845  } else {
846  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
847  || !ref_picture[0]) {
848  ref_picture = s->current_picture_ptr->f->data;
849  }
850  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
851  0, 0, s->field_select[dir][0],
852  ref_picture, pix_op,
853  s->mv[dir][0][0],
854  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
855  }
856  break;
857  case MV_TYPE_16X8:
858  for (int i = 0; i < 2; i++) {
859  uint8_t *const *ref2picture;
860 
861  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
862  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
863  ref_picture[0]) {
864  ref2picture = ref_picture;
865  } else {
866  ref2picture = s->current_picture_ptr->f->data;
867  }
868 
869  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
870  0, 0, s->field_select[dir][i],
871  ref2picture, pix_op,
872  s->mv[dir][i][0], s->mv[dir][i][1] +
873  2 * block_s * i, block_s, mb_y >> 1);
874 
875  dest_y += 2 * block_s * s->linesize;
876  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
877  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
878  }
879  break;
880  case MV_TYPE_DMV:
881  if (s->picture_structure == PICT_FRAME) {
882  for (int i = 0; i < 2; i++) {
883  for (int j = 0; j < 2; j++) {
884  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
885  1, j, j ^ i,
886  ref_picture, pix_op,
887  s->mv[dir][2 * i + j][0],
888  s->mv[dir][2 * i + j][1],
889  block_s, mb_y);
890  }
891  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
892  }
893  } else {
894  if (!ref_picture[0]) {
895  ref_picture = s->current_picture_ptr->f->data;
896  }
897  for (int i = 0; i < 2; i++) {
898  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
899  0, 0, s->picture_structure != i + 1,
900  ref_picture, pix_op,
901  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
902  2 * block_s, mb_y >> 1);
903 
904  // after put we make avg of the same block
905  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
906 
907  // opposite parity is always in the same
908  // frame if this is second field
909  if (!s->first_field) {
910  ref_picture = s->current_picture_ptr->f->data;
911  }
912  }
913  }
914  break;
915  default:
916  av_assert2(0);
917  }
918 }
919 
920 /**
921  * find the lowest MB row referenced in the MVs
922  */
924 {
925  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
926  int off, mvs;
927 
928  if (s->picture_structure != PICT_FRAME || s->mcsel)
929  goto unhandled;
930 
931  switch (s->mv_type) {
932  case MV_TYPE_16X16:
933  mvs = 1;
934  break;
935  case MV_TYPE_16X8:
936  mvs = 2;
937  break;
938  case MV_TYPE_8X8:
939  mvs = 4;
940  break;
941  default:
942  goto unhandled;
943  }
944 
945  for (int i = 0; i < mvs; i++) {
946  int my = s->mv[dir][i][1];
947  my_max = FFMAX(my_max, my);
948  my_min = FFMIN(my_min, my);
949  }
950 
951  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
952 
953  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
954 unhandled:
955  return s->mb_height - 1;
956 }
957 
958 /* add block[] to dest[] */
959 static inline void add_dct(MpegEncContext *s,
960  int16_t *block, int i, uint8_t *dest, int line_size)
961 {
962  if (s->block_last_index[i] >= 0) {
963  s->idsp.idct_add(dest, line_size, block);
964  }
965 }
966 
967 #define IS_ENCODER 0
969 
971 {
972  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
973  /* print DCT coefficients */
974  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
975  for (int i = 0; i < 6; i++) {
976  for (int j = 0; j < 64; j++) {
977  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
978  block[i][s->idsp.idct_permutation[j]]);
979  }
980  av_log(s->avctx, AV_LOG_DEBUG, "\n");
981  }
982  }
983 
984  if (!s->avctx->lowres) {
985 #if !CONFIG_SMALL
986  if (s->out_format == FMT_MPEG1)
988  else
990 #else
992 #endif
993  } else
995 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:267
av_clip
#define av_clip
Definition: common.h:99
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:88
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1349
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:467
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:540
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:522
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:269
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:936
NOT_MPEG12
#define NOT_MPEG12
Definition: mpv_reconstruct_mb_template.c:23
mpv_reconstruct_mb_template.c
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:65
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:175
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:526
AVFrame::width
int width
Definition: frame.h:447
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
last_picture
enum AVPictureType last_picture
Definition: movenc.c:70
b
#define b
Definition: input.c:41
MpegEncContext::next_picture
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:163
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture **picp, int reference)
Definition: mpegvideo_dec.c:238
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:296
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
mpegutils.h
thread.h
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:271
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:639
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
DEFINITELY_MPEG12
#define DEFINITELY_MPEG12
Definition: mpv_reconstruct_mb_template.c:25
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:254
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:970
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
fail
#define fail()
Definition: checkasm.h:179
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:533
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:460
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:626
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
emms_c
#define emms_c()
Definition: emms.h:63
alloc_dummy_frame
static int av_cold alloc_dummy_frame(MpegEncContext *s, Picture **picp)
Definition: mpegvideo_dec.c:286
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:190
width
#define width
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1956
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:127
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
s1
#define s1
Definition: regdef.h:38
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
limits.h
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
Picture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:70
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:381
threadframe.h
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:278
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:289
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:343
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
Definition: mpegpicture.c:305
Picture::tf
ThreadFrame tf
Definition: mpegpicture.h:48
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1364
lowres
static int lowres
Definition: ffplay.c:330
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:94
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:268
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:959
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1402
f
f
Definition: af_crystalizer.c:121
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:497
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:66
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:462
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:323
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:505
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:270
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:301
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:923
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:219
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:254
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:580
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:289
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:204
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:787
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:980
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:634
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1411
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:719
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:51
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:415
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:759
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:447
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:452
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:411
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:67
video_enc_params.h
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:160