FFmpeg
h264_mvpred.h
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... motion vector prediction
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 motion vector prediction.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
30 
31 #include "internal.h"
32 #include "avcodec.h"
33 #include "h264dec.h"
34 #include "mpegutils.h"
35 #include "libavutil/avassert.h"
36 #include "libavutil/mem_internal.h"
37 
38 
40  const int16_t **C,
41  int i, int list, int part_width)
42 {
43  const int topright_ref = sl->ref_cache[list][i - 8 + part_width];
44 
45  /* there is no consistent mapping of mvs to neighboring locations that will
46  * make mbaff happy, so we can't move all this logic to fill_caches */
47  if (FRAME_MBAFF(h)) {
48 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
49  const int xy = XY, y4 = Y4; \
50  const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
51  if (!USES_LIST(mb_type, list)) \
52  return LIST_NOT_USED; \
53  mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
54  sl->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
55  sl->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
56  return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
57 
58  if (topright_ref == PART_NOT_AVAILABLE
59  && i >= scan8[0] + 8 && (i & 7) == 4
60  && sl->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
61  const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
62  const int16_t *mv;
63  AV_ZERO32(sl->mv_cache[list][scan8[0] - 2]);
64  *C = sl->mv_cache[list][scan8[0] - 2];
65 
66  if (!MB_FIELD(sl) && IS_INTERLACED(sl->left_type[0])) {
67  SET_DIAG_MV(* 2, >> 1, sl->left_mb_xy[0] + h->mb_stride,
68  (sl->mb_y & 1) * 2 + (i >> 5));
69  }
70  if (MB_FIELD(sl) && !IS_INTERLACED(sl->left_type[0])) {
71  // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
72  SET_DIAG_MV(/ 2, *2, sl->left_mb_xy[i >= 36], ((i >> 2)) & 3);
73  }
74  }
75 #undef SET_DIAG_MV
76  }
77 
78  if (topright_ref != PART_NOT_AVAILABLE) {
79  *C = sl->mv_cache[list][i - 8 + part_width];
80  return topright_ref;
81  } else {
82  ff_tlog(h->avctx, "topright MV not available\n");
83 
84  *C = sl->mv_cache[list][i - 8 - 1];
85  return sl->ref_cache[list][i - 8 - 1];
86  }
87 }
88 
89 /**
90  * Get the predicted MV.
91  * @param n the block index
92  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
93  * @param mx the x component of the predicted motion vector
94  * @param my the y component of the predicted motion vector
95  */
96 static av_always_inline void pred_motion(const H264Context *const h,
97  H264SliceContext *sl,
98  int n,
99  int part_width, int list, int ref,
100  int *const mx, int *const my)
101 {
102  const int index8 = scan8[n];
103  const int top_ref = sl->ref_cache[list][index8 - 8];
104  const int left_ref = sl->ref_cache[list][index8 - 1];
105  const int16_t *const A = sl->mv_cache[list][index8 - 1];
106  const int16_t *const B = sl->mv_cache[list][index8 - 8];
107  const int16_t *C;
108  int diagonal_ref, match_count;
109 
110  av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
111 
112 /* mv_cache
113  * B . . A T T T T
114  * U . . L . . , .
115  * U . . L . . . .
116  * U . . L . . , .
117  * . . . L . . . .
118  */
119 
120  diagonal_ref = fetch_diagonal_mv(h, sl, &C, index8, list, part_width);
121  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
122  ff_tlog(h->avctx, "pred_motion match_count=%d\n", match_count);
123  if (match_count > 1) { //most common
124  *mx = mid_pred(A[0], B[0], C[0]);
125  *my = mid_pred(A[1], B[1], C[1]);
126  } else if (match_count == 1) {
127  if (left_ref == ref) {
128  *mx = A[0];
129  *my = A[1];
130  } else if (top_ref == ref) {
131  *mx = B[0];
132  *my = B[1];
133  } else {
134  *mx = C[0];
135  *my = C[1];
136  }
137  } else {
138  if (top_ref == PART_NOT_AVAILABLE &&
139  diagonal_ref == PART_NOT_AVAILABLE &&
140  left_ref != PART_NOT_AVAILABLE) {
141  *mx = A[0];
142  *my = A[1];
143  } else {
144  *mx = mid_pred(A[0], B[0], C[0]);
145  *my = mid_pred(A[1], B[1], C[1]);
146  }
147  }
148 
149  ff_tlog(h->avctx,
150  "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
151  top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
152  A[0], A[1], ref, *mx, *my, sl->mb_x, sl->mb_y, n, list);
153 }
154 
155 /**
156  * Get the directionally predicted 16x8 MV.
157  * @param n the block index
158  * @param mx the x component of the predicted motion vector
159  * @param my the y component of the predicted motion vector
160  */
162  H264SliceContext *sl,
163  int n, int list, int ref,
164  int *const mx, int *const my)
165 {
166  if (n == 0) {
167  const int top_ref = sl->ref_cache[list][scan8[0] - 8];
168  const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
169 
170  ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
171  top_ref, B[0], B[1], sl->mb_x, sl->mb_y, n, list);
172 
173  if (top_ref == ref) {
174  *mx = B[0];
175  *my = B[1];
176  return;
177  }
178  } else {
179  const int left_ref = sl->ref_cache[list][scan8[8] - 1];
180  const int16_t *const A = sl->mv_cache[list][scan8[8] - 1];
181 
182  ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
183  left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
184 
185  if (left_ref == ref) {
186  *mx = A[0];
187  *my = A[1];
188  return;
189  }
190  }
191 
192  //RARE
193  pred_motion(h, sl, n, 4, list, ref, mx, my);
194 }
195 
196 /**
197  * Get the directionally predicted 8x16 MV.
198  * @param n the block index
199  * @param mx the x component of the predicted motion vector
200  * @param my the y component of the predicted motion vector
201  */
203  H264SliceContext *sl,
204  int n, int list, int ref,
205  int *const mx, int *const my)
206 {
207  if (n == 0) {
208  const int left_ref = sl->ref_cache[list][scan8[0] - 1];
209  const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
210 
211  ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
212  left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
213 
214  if (left_ref == ref) {
215  *mx = A[0];
216  *my = A[1];
217  return;
218  }
219  } else {
220  const int16_t *C;
221  int diagonal_ref;
222 
223  diagonal_ref = fetch_diagonal_mv(h, sl, &C, scan8[4], list, 2);
224 
225  ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
226  diagonal_ref, C[0], C[1], sl->mb_x, sl->mb_y, n, list);
227 
228  if (diagonal_ref == ref) {
229  *mx = C[0];
230  *my = C[1];
231  return;
232  }
233  }
234 
235  //RARE
236  pred_motion(h, sl, n, 2, list, ref, mx, my);
237 }
238 
239 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
240  if (FRAME_MBAFF(h)) { \
241  if (MB_FIELD(sl)) { \
242  if (!IS_INTERLACED(type)) { \
243  refn <<= 1; \
244  AV_COPY32(mvbuf[idx], mvn); \
245  mvbuf[idx][1] /= 2; \
246  mvn = mvbuf[idx]; \
247  } \
248  } else { \
249  if (IS_INTERLACED(type)) { \
250  refn >>= 1; \
251  AV_COPY32(mvbuf[idx], mvn); \
252  mvbuf[idx][1] *= 2; \
253  mvn = mvbuf[idx]; \
254  } \
255  } \
256  }
257 
259  H264SliceContext *sl)
260 {
261  DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
262  DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
263  int8_t *ref = h->cur_pic.ref_index[0];
264  int16_t(*mv)[2] = h->cur_pic.motion_val[0];
265  int top_ref, left_ref, diagonal_ref, match_count, mx, my;
266  const int16_t *A, *B, *C;
267  int b_stride = h->b_stride;
268 
269  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
270 
271  /* To avoid doing an entire fill_decode_caches, we inline the relevant
272  * parts here.
273  * FIXME: this is a partial duplicate of the logic in fill_decode_caches,
274  * but it's faster this way. Is there a way to avoid this duplication?
275  */
276  if (USES_LIST(sl->left_type[LTOP], 0)) {
277  left_ref = ref[4 * sl->left_mb_xy[LTOP] + 1 + (sl->left_block[0] & ~1)];
278  A = mv[h->mb2b_xy[sl->left_mb_xy[LTOP]] + 3 + b_stride * sl->left_block[0]];
279  FIX_MV_MBAFF(sl->left_type[LTOP], left_ref, A, 0);
280  if (!(left_ref | AV_RN32A(A)))
281  goto zeromv;
282  } else if (sl->left_type[LTOP]) {
283  left_ref = LIST_NOT_USED;
284  A = zeromv;
285  } else {
286  goto zeromv;
287  }
288 
289  if (USES_LIST(sl->top_type, 0)) {
290  top_ref = ref[4 * sl->top_mb_xy + 2];
291  B = mv[h->mb2b_xy[sl->top_mb_xy] + 3 * b_stride];
292  FIX_MV_MBAFF(sl->top_type, top_ref, B, 1);
293  if (!(top_ref | AV_RN32A(B)))
294  goto zeromv;
295  } else if (sl->top_type) {
296  top_ref = LIST_NOT_USED;
297  B = zeromv;
298  } else {
299  goto zeromv;
300  }
301 
302  ff_tlog(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
303  top_ref, left_ref, sl->mb_x, sl->mb_y);
304 
305  if (USES_LIST(sl->topright_type, 0)) {
306  diagonal_ref = ref[4 * sl->topright_mb_xy + 2];
307  C = mv[h->mb2b_xy[sl->topright_mb_xy] + 3 * b_stride];
308  FIX_MV_MBAFF(sl->topright_type, diagonal_ref, C, 2);
309  } else if (sl->topright_type) {
310  diagonal_ref = LIST_NOT_USED;
311  C = zeromv;
312  } else {
313  if (USES_LIST(sl->topleft_type, 0)) {
314  diagonal_ref = ref[4 * sl->topleft_mb_xy + 1 +
315  (sl->topleft_partition & 2)];
316  C = mv[h->mb2b_xy[sl->topleft_mb_xy] + 3 + b_stride +
317  (sl->topleft_partition & 2 * b_stride)];
318  FIX_MV_MBAFF(sl->topleft_type, diagonal_ref, C, 2);
319  } else if (sl->topleft_type) {
320  diagonal_ref = LIST_NOT_USED;
321  C = zeromv;
322  } else {
323  diagonal_ref = PART_NOT_AVAILABLE;
324  C = zeromv;
325  }
326  }
327 
328  match_count = !diagonal_ref + !top_ref + !left_ref;
329  ff_tlog(h->avctx, "pred_pskip_motion match_count=%d\n", match_count);
330  if (match_count > 1) {
331  mx = mid_pred(A[0], B[0], C[0]);
332  my = mid_pred(A[1], B[1], C[1]);
333  } else if (match_count == 1) {
334  if (!left_ref) {
335  mx = A[0];
336  my = A[1];
337  } else if (!top_ref) {
338  mx = B[0];
339  my = B[1];
340  } else {
341  mx = C[0];
342  my = C[1];
343  }
344  } else {
345  mx = mid_pred(A[0], B[0], C[0]);
346  my = mid_pred(A[1], B[1], C[1]);
347  }
348 
349  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx, my), 4);
350  return;
351 
352 zeromv:
353  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
354  return;
355 }
356 
357 static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
358 {
359  const int mb_xy = sl->mb_xy;
360  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
361  static const uint8_t left_block_options[4][32] = {
362  { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
363  { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
364  { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
365  { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
366  };
367 
368  sl->topleft_partition = -1;
369 
370  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
371 
372  /* Wow, what a mess, why didn't they simplify the interlacing & intra
373  * stuff, I can't imagine that these complex rules are worth it. */
374 
375  topleft_xy = top_xy - 1;
376  topright_xy = top_xy + 1;
377  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
378  sl->left_block = left_block_options[0];
379  if (FRAME_MBAFF(h)) {
380  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
381  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
382  if (sl->mb_y & 1) {
383  if (left_mb_field_flag != curr_mb_field_flag) {
384  left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1;
385  if (curr_mb_field_flag) {
386  left_xy[LBOT] += h->mb_stride;
387  sl->left_block = left_block_options[3];
388  } else {
389  topleft_xy += h->mb_stride;
390  /* take top left mv from the middle of the mb, as opposed
391  * to all other modes which use the bottom right partition */
392  sl->topleft_partition = 0;
393  sl->left_block = left_block_options[1];
394  }
395  }
396  } else {
397  if (curr_mb_field_flag) {
398  topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
399  topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
400  top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
401  }
402  if (left_mb_field_flag != curr_mb_field_flag) {
403  if (curr_mb_field_flag) {
404  left_xy[LBOT] += h->mb_stride;
405  sl->left_block = left_block_options[3];
406  } else {
407  sl->left_block = left_block_options[2];
408  }
409  }
410  }
411  }
412 
413  sl->topleft_mb_xy = topleft_xy;
414  sl->top_mb_xy = top_xy;
415  sl->topright_mb_xy = topright_xy;
416  sl->left_mb_xy[LTOP] = left_xy[LTOP];
417  sl->left_mb_xy[LBOT] = left_xy[LBOT];
418  //FIXME do we need all in the context?
419 
420  sl->topleft_type = h->cur_pic.mb_type[topleft_xy];
421  sl->top_type = h->cur_pic.mb_type[top_xy];
422  sl->topright_type = h->cur_pic.mb_type[topright_xy];
423  sl->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
424  sl->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
425 
426  if (FMO) {
427  if (h->slice_table[topleft_xy] != sl->slice_num)
428  sl->topleft_type = 0;
429  if (h->slice_table[top_xy] != sl->slice_num)
430  sl->top_type = 0;
431  if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
432  sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
433  } else {
434  if (h->slice_table[topleft_xy] != sl->slice_num) {
435  sl->topleft_type = 0;
436  if (h->slice_table[top_xy] != sl->slice_num)
437  sl->top_type = 0;
438  if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
439  sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
440  }
441  }
442  if (h->slice_table[topright_xy] != sl->slice_num)
443  sl->topright_type = 0;
444 }
445 
446 static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
447 {
448  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
449  int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
450  const uint8_t *left_block = sl->left_block;
451  int i;
452  uint8_t *nnz;
453  uint8_t *nnz_cache;
454 
455  topleft_xy = sl->topleft_mb_xy;
456  top_xy = sl->top_mb_xy;
457  topright_xy = sl->topright_mb_xy;
458  left_xy[LTOP] = sl->left_mb_xy[LTOP];
459  left_xy[LBOT] = sl->left_mb_xy[LBOT];
460  topleft_type = sl->topleft_type;
461  top_type = sl->top_type;
462  topright_type = sl->topright_type;
463  left_type[LTOP] = sl->left_type[LTOP];
464  left_type[LBOT] = sl->left_type[LBOT];
465 
466  if (!IS_SKIP(mb_type)) {
467  if (IS_INTRA(mb_type)) {
468  int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
471  sl->left_samples_available = 0xFFFF;
472  sl->topright_samples_available = 0xEEEA;
473 
474  if (!(top_type & type_mask)) {
475  sl->topleft_samples_available = 0xB3FF;
476  sl->top_samples_available = 0x33FF;
477  sl->topright_samples_available = 0x26EA;
478  }
479  if (IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])) {
480  if (IS_INTERLACED(mb_type)) {
481  if (!(left_type[LTOP] & type_mask)) {
482  sl->topleft_samples_available &= 0xDFFF;
483  sl->left_samples_available &= 0x5FFF;
484  }
485  if (!(left_type[LBOT] & type_mask)) {
486  sl->topleft_samples_available &= 0xFF5F;
487  sl->left_samples_available &= 0xFF5F;
488  }
489  } else {
490  int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
491 
492  av_assert2(left_xy[LTOP] == left_xy[LBOT]);
493  if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
494  sl->topleft_samples_available &= 0xDF5F;
495  sl->left_samples_available &= 0x5F5F;
496  }
497  }
498  } else {
499  if (!(left_type[LTOP] & type_mask)) {
500  sl->topleft_samples_available &= 0xDF5F;
501  sl->left_samples_available &= 0x5F5F;
502  }
503  }
504 
505  if (!(topleft_type & type_mask))
506  sl->topleft_samples_available &= 0x7FFF;
507 
508  if (!(topright_type & type_mask))
509  sl->topright_samples_available &= 0xFBFF;
510 
511  if (IS_INTRA4x4(mb_type)) {
512  if (IS_INTRA4x4(top_type)) {
513  AV_COPY32(sl->intra4x4_pred_mode_cache + 4 + 8 * 0, sl->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
514  } else {
515  sl->intra4x4_pred_mode_cache[4 + 8 * 0] =
516  sl->intra4x4_pred_mode_cache[5 + 8 * 0] =
517  sl->intra4x4_pred_mode_cache[6 + 8 * 0] =
518  sl->intra4x4_pred_mode_cache[7 + 8 * 0] = 2 - 3 * !(top_type & type_mask);
519  }
520  for (i = 0; i < 2; i++) {
521  if (IS_INTRA4x4(left_type[LEFT(i)])) {
522  int8_t *mode = sl->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
523  sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] = mode[6 - left_block[0 + 2 * i]];
524  sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = mode[6 - left_block[1 + 2 * i]];
525  } else {
526  sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] =
527  sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = 2 - 3 * !(left_type[LEFT(i)] & type_mask);
528  }
529  }
530  }
531  }
532 
533  /*
534  * 0 . T T. T T T T
535  * 1 L . .L . . . .
536  * 2 L . .L . . . .
537  * 3 . T TL . . . .
538  * 4 L . .L . . . .
539  * 5 L . .. . . . .
540  */
541  /* FIXME: constraint_intra_pred & partitioning & nnz
542  * (let us hope this is just a typo in the spec) */
543  nnz_cache = sl->non_zero_count_cache;
544  if (top_type) {
545  nnz = h->non_zero_count[top_xy];
546  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
547  if (!h->chroma_y_shift) {
548  AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
549  AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
550  } else {
551  AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
552  AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
553  }
554  } else {
555  uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040;
556  AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
557  AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
558  AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
559  }
560 
561  for (i = 0; i < 2; i++) {
562  if (left_type[LEFT(i)]) {
563  nnz = h->non_zero_count[left_xy[LEFT(i)]];
564  nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
565  nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
566  if (CHROMA444(h)) {
567  nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
568  nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
569  nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
570  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 8 * 4];
571  } else if (CHROMA422(h)) {
572  nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 4 * 4];
573  nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 4 * 4];
574  nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 8 * 4];
575  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 8 * 4];
576  } else {
577  nnz_cache[3 + 8 * 6 + 8 * i] = nnz[left_block[8 + 4 + 2 * i]];
578  nnz_cache[3 + 8 * 11 + 8 * i] = nnz[left_block[8 + 5 + 2 * i]];
579  }
580  } else {
581  nnz_cache[3 + 8 * 1 + 2 * 8 * i] =
582  nnz_cache[3 + 8 * 2 + 2 * 8 * i] =
583  nnz_cache[3 + 8 * 6 + 2 * 8 * i] =
584  nnz_cache[3 + 8 * 7 + 2 * 8 * i] =
585  nnz_cache[3 + 8 * 11 + 2 * 8 * i] =
586  nnz_cache[3 + 8 * 12 + 2 * 8 * i] = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 64;
587  }
588  }
589 
590  if (CABAC(h)) {
591  // top_cbp
592  if (top_type)
593  sl->top_cbp = h->cbp_table[top_xy];
594  else
595  sl->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
596  // left_cbp
597  if (left_type[LTOP]) {
598  sl->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) |
599  ((h->cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
600  (((h->cbp_table[left_xy[LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
601  } else {
602  sl->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
603  }
604  }
605  }
606 
607  if (IS_INTER(mb_type) || (IS_DIRECT(mb_type) && sl->direct_spatial_mv_pred)) {
608  int list;
609  int b_stride = h->b_stride;
610  for (list = 0; list < sl->list_count; list++) {
611  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
612  int8_t *ref = h->cur_pic.ref_index[list];
613  int16_t(*mv_cache)[2] = &sl->mv_cache[list][scan8[0]];
614  int16_t(*mv)[2] = h->cur_pic.motion_val[list];
615  if (!USES_LIST(mb_type, list))
616  continue;
617  av_assert2(!(IS_DIRECT(mb_type) && !sl->direct_spatial_mv_pred));
618 
619  if (USES_LIST(top_type, list)) {
620  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
621  AV_COPY128(mv_cache[0 - 1 * 8], mv[b_xy + 0]);
622  ref_cache[0 - 1 * 8] =
623  ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
624  ref_cache[2 - 1 * 8] =
625  ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
626  } else {
627  AV_ZERO128(mv_cache[0 - 1 * 8]);
628  AV_WN32A(&ref_cache[0 - 1 * 8],
629  ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE) & 0xFF) * 0x01010101u);
630  }
631 
632  if (mb_type & (MB_TYPE_16x8 | MB_TYPE_8x8)) {
633  for (i = 0; i < 2; i++) {
634  int cache_idx = -1 + i * 2 * 8;
635  if (USES_LIST(left_type[LEFT(i)], list)) {
636  const int b_xy = h->mb2b_xy[left_xy[LEFT(i)]] + 3;
637  const int b8_xy = 4 * left_xy[LEFT(i)] + 1;
638  AV_COPY32(mv_cache[cache_idx],
639  mv[b_xy + b_stride * left_block[0 + i * 2]]);
640  AV_COPY32(mv_cache[cache_idx + 8],
641  mv[b_xy + b_stride * left_block[1 + i * 2]]);
642  ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
643  ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
644  } else {
645  AV_ZERO32(mv_cache[cache_idx]);
646  AV_ZERO32(mv_cache[cache_idx + 8]);
647  ref_cache[cache_idx] =
648  ref_cache[cache_idx + 8] = (left_type[LEFT(i)]) ? LIST_NOT_USED
650  }
651  }
652  } else {
653  if (USES_LIST(left_type[LTOP], list)) {
654  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
655  const int b8_xy = 4 * left_xy[LTOP] + 1;
656  AV_COPY32(mv_cache[-1], mv[b_xy + b_stride * left_block[0]]);
657  ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
658  } else {
659  AV_ZERO32(mv_cache[-1]);
660  ref_cache[-1] = left_type[LTOP] ? LIST_NOT_USED
662  }
663  }
664 
665  if (USES_LIST(topright_type, list)) {
666  const int b_xy = h->mb2b_xy[topright_xy] + 3 * b_stride;
667  AV_COPY32(mv_cache[4 - 1 * 8], mv[b_xy]);
668  ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
669  } else {
670  AV_ZERO32(mv_cache[4 - 1 * 8]);
671  ref_cache[4 - 1 * 8] = topright_type ? LIST_NOT_USED
673  }
674  if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
675  if (USES_LIST(topleft_type, list)) {
676  const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride +
677  (sl->topleft_partition & 2 * b_stride);
678  const int b8_xy = 4 * topleft_xy + 1 + (sl->topleft_partition & 2);
679  AV_COPY32(mv_cache[-1 - 1 * 8], mv[b_xy]);
680  ref_cache[-1 - 1 * 8] = ref[b8_xy];
681  } else {
682  AV_ZERO32(mv_cache[-1 - 1 * 8]);
683  ref_cache[-1 - 1 * 8] = topleft_type ? LIST_NOT_USED
685  }
686  }
687 
688  if ((mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2)) && !FRAME_MBAFF(h))
689  continue;
690 
691  if (!(mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2))) {
692  uint8_t(*mvd_cache)[2] = &sl->mvd_cache[list][scan8[0]];
693  uint8_t(*mvd)[2] = sl->mvd_table[list];
694  ref_cache[2 + 8 * 0] =
695  ref_cache[2 + 8 * 2] = PART_NOT_AVAILABLE;
696  AV_ZERO32(mv_cache[2 + 8 * 0]);
697  AV_ZERO32(mv_cache[2 + 8 * 2]);
698 
699  if (CABAC(h)) {
700  if (USES_LIST(top_type, list)) {
701  const int b_xy = h->mb2br_xy[top_xy];
702  AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
703  } else {
704  AV_ZERO64(mvd_cache[0 - 1 * 8]);
705  }
706  if (USES_LIST(left_type[LTOP], list)) {
707  const int b_xy = h->mb2br_xy[left_xy[LTOP]] + 6;
708  AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
709  AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
710  } else {
711  AV_ZERO16(mvd_cache[-1 + 0 * 8]);
712  AV_ZERO16(mvd_cache[-1 + 1 * 8]);
713  }
714  if (USES_LIST(left_type[LBOT], list)) {
715  const int b_xy = h->mb2br_xy[left_xy[LBOT]] + 6;
716  AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
717  AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
718  } else {
719  AV_ZERO16(mvd_cache[-1 + 2 * 8]);
720  AV_ZERO16(mvd_cache[-1 + 3 * 8]);
721  }
722  AV_ZERO16(mvd_cache[2 + 8 * 0]);
723  AV_ZERO16(mvd_cache[2 + 8 * 2]);
724  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
725  uint8_t *direct_cache = &sl->direct_cache[scan8[0]];
726  uint8_t *direct_table = h->direct_table;
727  fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
728 
729  if (IS_DIRECT(top_type)) {
730  AV_WN32A(&direct_cache[-1 * 8],
731  0x01010101u * (MB_TYPE_DIRECT2 >> 1));
732  } else if (IS_8X8(top_type)) {
733  int b8_xy = 4 * top_xy;
734  direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
735  direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
736  } else {
737  AV_WN32A(&direct_cache[-1 * 8],
738  0x01010101 * (MB_TYPE_16x16 >> 1));
739  }
740 
741  if (IS_DIRECT(left_type[LTOP]))
742  direct_cache[-1 + 0 * 8] = MB_TYPE_DIRECT2 >> 1;
743  else if (IS_8X8(left_type[LTOP]))
744  direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[LTOP] + 1 + (left_block[0] & ~1)];
745  else
746  direct_cache[-1 + 0 * 8] = MB_TYPE_16x16 >> 1;
747 
748  if (IS_DIRECT(left_type[LBOT]))
749  direct_cache[-1 + 2 * 8] = MB_TYPE_DIRECT2 >> 1;
750  else if (IS_8X8(left_type[LBOT]))
751  direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
752  else
753  direct_cache[-1 + 2 * 8] = MB_TYPE_16x16 >> 1;
754  }
755  }
756  }
757 
758 #define MAP_MVS \
759  MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
760  MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
761  MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
762  MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
763  MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
764  MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
765  MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
766  MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
767  MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
768  MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
769 
770  if (FRAME_MBAFF(h)) {
771  if (MB_FIELD(sl)) {
772 
773 #define MAP_F2F(idx, mb_type) \
774  if (!IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
775  sl->ref_cache[list][idx] *= 2; \
776  sl->mv_cache[list][idx][1] /= 2; \
777  sl->mvd_cache[list][idx][1] >>= 1; \
778  }
779 
780  MAP_MVS
781  } else {
782 
783 #undef MAP_F2F
784 #define MAP_F2F(idx, mb_type) \
785  if (IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
786  sl->ref_cache[list][idx] >>= 1; \
787  sl->mv_cache[list][idx][1] *= 2; \
788  sl->mvd_cache[list][idx][1] <<= 1; \
789  }
790 
791  MAP_MVS
792 #undef MAP_F2F
793  }
794  }
795  }
796  }
797 
798  sl->neighbor_transform_size = !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
799 }
800 
801 /**
802  * decodes a P_SKIP or B_SKIP macroblock
803  */
805 {
806  const int mb_xy = sl->mb_xy;
807  int mb_type = 0;
808 
809  memset(h->non_zero_count[mb_xy], 0, 48);
810 
811  if (MB_FIELD(sl))
812  mb_type |= MB_TYPE_INTERLACED;
813 
814  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
815  // just for fill_caches. pred_direct_motion will set the real mb_type
817  if (sl->direct_spatial_mv_pred) {
818  fill_decode_neighbors(h, sl, mb_type);
819  fill_decode_caches(h, sl, mb_type); //FIXME check what is needed and what not ...
820  }
821  ff_h264_pred_direct_motion(h, sl, &mb_type);
822  mb_type |= MB_TYPE_SKIP;
823  } else {
825 
826  fill_decode_neighbors(h, sl, mb_type);
827  pred_pskip_motion(h, sl);
828  }
829 
830  write_back_motion(h, sl, mb_type);
831  h->cur_pic.mb_type[mb_xy] = mb_type;
832  h->cur_pic.qscale_table[mb_xy] = sl->qscale;
833  h->slice_table[mb_xy] = sl->slice_num;
834  sl->prev_mb_skipped = 1;
835 }
836 
837 #endif /* AVCODEC_H264_MVPRED_H */
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:74
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:88
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:243
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:312
H264SliceContext::topleft_samples_available
unsigned int topleft_samples_available
Definition: h264dec.h:233
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:100
mem_internal.h
pred_16x8_motion
static av_always_inline void pred_16x8_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 16x8 MV.
Definition: h264_mvpred.h:161
H264SliceContext::topright_mb_xy
int topright_mb_xy
Definition: h264dec.h:222
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:54
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:325
av_unused
#define av_unused
Definition: attributes.h:131
internal.h
H264SliceContext::topleft_partition
int topleft_partition
Definition: h264dec.h:231
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:53
H264SliceContext::left_block
const uint8_t * left_block
Definition: h264dec.h:230
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:223
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
mpegutils.h
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:242
H264SliceContext
Definition: h264dec.h:189
A
#define A(x)
Definition: vp56_arith.h:28
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:74
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:311
H264SliceContext::mvd_cache
uint8_t mvd_cache[2][5 *8][2]
Definition: h264dec.h:313
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:98
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:263
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:194
fill_decode_neighbors
static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:357
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
avassert.h
MB_TYPE_P1L0
#define MB_TYPE_P1L0
Definition: mpegutils.h:63
fetch_diagonal_mv
static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl, const int16_t **C, int i, int list, int part_width)
Definition: h264_mvpred.h:39
H264SliceContext::topleft_mb_xy
int topleft_mb_xy
Definition: h264dec.h:220
AV_ZERO64
#define AV_ZERO64(d)
Definition: intreadwrite.h:633
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:80
IS_INTRA
#define IS_INTRA(x, y)
ff_h264_pred_direct_motion
void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
Definition: h264_direct.c:721
H264SliceContext::topright_samples_available
unsigned int topright_samples_available
Definition: h264dec.h:235
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
decode_mb_skip
static void av_unused decode_mb_skip(const H264Context *h, H264SliceContext *sl)
decodes a P_SKIP or B_SKIP macroblock
Definition: h264_mvpred.h:804
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
fill_decode_caches
static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_mvpred.h:446
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:56
MB_TYPE_P0L0
#define MB_TYPE_P0L0
Definition: mpegutils.h:62
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:82
H264SliceContext::top_samples_available
unsigned int top_samples_available
Definition: h264dec.h:234
H264SliceContext::qscale
int qscale
Definition: h264dec.h:199
write_back_motion
static av_always_inline void write_back_motion(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264dec.h:797
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
H264SliceContext::top_type
int top_type
Definition: h264dec.h:226
H264SliceContext::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264dec.h:217
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:77
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:221
SET_DIAG_MV
#define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
LEFT
#define LEFT
Definition: cdgraphics.c:166
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:228
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:57
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:242
pred_motion
static av_always_inline void pred_motion(const H264Context *const h, H264SliceContext *sl, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:96
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:196
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:75
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:83
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:680
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:68
AV_COPY16
#define AV_COPY16(d, s)
Definition: intreadwrite.h:597
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:409
h264dec.h
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
H264SliceContext::top_cbp
int top_cbp
Definition: h264dec.h:268
H264SliceContext::topleft_type
int topleft_type
Definition: h264dec.h:225
H264Context
H264Context.
Definition: h264dec.h:350
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264dec.h:410
pred_pskip_motion
static av_always_inline void pred_pskip_motion(const H264Context *const h, H264SliceContext *sl)
Definition: h264_mvpred.h:258
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:280
avcodec.h
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:526
mid_pred
#define mid_pred
Definition: mathops.h:97
FMO
#define FMO
Definition: h264dec.h:64
pred_8x16_motion
static av_always_inline void pred_8x16_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 8x16 MV.
Definition: h264_mvpred.h:202
B
#define B
Definition: huffyuvdsp.h:32
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mode
mode
Definition: ebur128.h:83
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:664
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
H264SliceContext::left_cbp
int left_cbp
Definition: h264dec.h:269
LBOT
#define LBOT
Definition: h264dec.h:79
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:306
H264SliceContext::direct_cache
uint8_t direct_cache[5 *8]
Definition: h264dec.h:314
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:78
H264SliceContext::topright_type
int topright_type
Definition: h264dec.h:227
H264SliceContext::left_samples_available
unsigned int left_samples_available
Definition: h264dec.h:236
AV_ZERO16
#define AV_ZERO16(d)
Definition: intreadwrite.h:625
H264SliceContext::neighbor_transform_size
int neighbor_transform_size
number of neighbors (top and/or left) that used 8x8 dct
Definition: h264dec.h:261
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:205
MAP_MVS
#define MAP_MVS
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:218
LTOP
#define LTOP
Definition: h264dec.h:78
FIX_MV_MBAFF
#define FIX_MV_MBAFF(type, refn, mvn, idx)
Definition: h264_mvpred.h:239
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:58
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:101
h
h
Definition: vp9dsp_template.c:2038
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:106
H264SliceContext::prev_mb_skipped
int prev_mb_skipped
Definition: h264dec.h:211