FFmpeg
h264_mb.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 macroblock decoding
25  */
26 
27 #include <stdint.h>
28 
29 #include "config.h"
30 
31 #include "libavutil/common.h"
32 #include "libavutil/intreadwrite.h"
33 #include "avcodec.h"
34 #include "h264dec.h"
35 #include "h264_ps.h"
36 #include "qpeldsp.h"
37 #include "thread.h"
38 
40  int n, int height, int y_offset, int list)
41 {
42  int raw_my = sl->mv_cache[list][scan8[n]][1];
43  int filter_height_down = (raw_my & 3) ? 3 : 0;
44  int full_my = (raw_my >> 2) + y_offset;
45  int bottom = full_my + filter_height_down + height;
46 
47  av_assert2(height >= 0);
48 
49  return FFMAX(0, bottom);
50 }
51 
52 static inline void get_lowest_part_y(const H264Context *h, H264SliceContext *sl,
53  int16_t refs[2][48], int n,
54  int height, int y_offset, int list0,
55  int list1, int *nrefs)
56 {
57  int my;
58 
59  y_offset += 16 * (sl->mb_y >> MB_FIELD(sl));
60 
61  if (list0) {
62  int ref_n = sl->ref_cache[0][scan8[n]];
63  H264Ref *ref = &sl->ref_list[0][ref_n];
64 
65  // Error resilience puts the current picture in the ref list.
66  // Don't try to wait on these as it will cause a deadlock.
67  // Fields can wait on each other, though.
68  if (ref->parent->tf.progress->data != h->cur_pic.tf.progress->data ||
69  (ref->reference & 3) != h->picture_structure) {
70  my = get_lowest_part_list_y(sl, n, height, y_offset, 0);
71  if (refs[0][ref_n] < 0)
72  nrefs[0] += 1;
73  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
74  }
75  }
76 
77  if (list1) {
78  int ref_n = sl->ref_cache[1][scan8[n]];
79  H264Ref *ref = &sl->ref_list[1][ref_n];
80 
81  if (ref->parent->tf.progress->data != h->cur_pic.tf.progress->data ||
82  (ref->reference & 3) != h->picture_structure) {
83  my = get_lowest_part_list_y(sl, n, height, y_offset, 1);
84  if (refs[1][ref_n] < 0)
85  nrefs[1] += 1;
86  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
87  }
88  }
89 }
90 
91 /**
92  * Wait until all reference frames are available for MC operations.
93  *
94  * @param h the H.264 context
95  */
97 {
98  const int mb_xy = sl->mb_xy;
99  const int mb_type = h->cur_pic.mb_type[mb_xy];
100  int16_t refs[2][48];
101  int nrefs[2] = { 0 };
102  int ref, list;
103 
104  memset(refs, -1, sizeof(refs));
105 
106  if (IS_16X16(mb_type)) {
107  get_lowest_part_y(h, sl, refs, 0, 16, 0,
108  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
109  } else if (IS_16X8(mb_type)) {
110  get_lowest_part_y(h, sl, refs, 0, 8, 0,
111  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
112  get_lowest_part_y(h, sl, refs, 8, 8, 8,
113  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
114  } else if (IS_8X16(mb_type)) {
115  get_lowest_part_y(h, sl, refs, 0, 16, 0,
116  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
117  get_lowest_part_y(h, sl, refs, 4, 16, 0,
118  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
119  } else {
120  int i;
121 
122  av_assert2(IS_8X8(mb_type));
123 
124  for (i = 0; i < 4; i++) {
125  const int sub_mb_type = sl->sub_mb_type[i];
126  const int n = 4 * i;
127  int y_offset = (i & 2) << 2;
128 
129  if (IS_SUB_8X8(sub_mb_type)) {
130  get_lowest_part_y(h, sl, refs, n, 8, y_offset,
131  IS_DIR(sub_mb_type, 0, 0),
132  IS_DIR(sub_mb_type, 0, 1),
133  nrefs);
134  } else if (IS_SUB_8X4(sub_mb_type)) {
135  get_lowest_part_y(h, sl, refs, n, 4, y_offset,
136  IS_DIR(sub_mb_type, 0, 0),
137  IS_DIR(sub_mb_type, 0, 1),
138  nrefs);
139  get_lowest_part_y(h, sl, refs, n + 2, 4, y_offset + 4,
140  IS_DIR(sub_mb_type, 0, 0),
141  IS_DIR(sub_mb_type, 0, 1),
142  nrefs);
143  } else if (IS_SUB_4X8(sub_mb_type)) {
144  get_lowest_part_y(h, sl, refs, n, 8, y_offset,
145  IS_DIR(sub_mb_type, 0, 0),
146  IS_DIR(sub_mb_type, 0, 1),
147  nrefs);
148  get_lowest_part_y(h, sl, refs, n + 1, 8, y_offset,
149  IS_DIR(sub_mb_type, 0, 0),
150  IS_DIR(sub_mb_type, 0, 1),
151  nrefs);
152  } else {
153  int j;
154  av_assert2(IS_SUB_4X4(sub_mb_type));
155  for (j = 0; j < 4; j++) {
156  int sub_y_offset = y_offset + 2 * (j & 2);
157  get_lowest_part_y(h, sl, refs, n + j, 4, sub_y_offset,
158  IS_DIR(sub_mb_type, 0, 0),
159  IS_DIR(sub_mb_type, 0, 1),
160  nrefs);
161  }
162  }
163  }
164  }
165 
166  for (list = sl->list_count - 1; list >= 0; list--)
167  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
168  int row = refs[list][ref];
169  if (row >= 0) {
170  H264Ref *ref_pic = &sl->ref_list[list][ref];
171  int ref_field = ref_pic->reference - 1;
172  int ref_field_picture = ref_pic->parent->field_picture;
173  int pic_height = 16 * h->mb_height >> ref_field_picture;
174 
175  row <<= MB_MBAFF(sl);
176  nrefs[list]--;
177 
178  if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
179  av_assert2((ref_pic->parent->reference & 3) == 3);
181  FFMIN((row >> 1) - !(row & 1),
182  pic_height - 1),
183  1);
185  FFMIN((row >> 1), pic_height - 1),
186  0);
187  } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
189  FFMIN(row * 2 + ref_field,
190  pic_height - 1),
191  0);
192  } else if (FIELD_PICTURE(h)) {
194  FFMIN(row, pic_height - 1),
195  ref_field);
196  } else {
198  FFMIN(row, pic_height - 1),
199  0);
200  }
201  }
202  }
203 }
204 
206  H264Ref *pic,
207  int n, int square, int height,
208  int delta, int list,
209  uint8_t *dest_y, uint8_t *dest_cb,
210  uint8_t *dest_cr,
211  int src_x_offset, int src_y_offset,
212  const qpel_mc_func *qpix_op,
213  h264_chroma_mc_func chroma_op,
214  int pixel_shift, int chroma_idc)
215 {
216  const int mx = sl->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
217  int my = sl->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
218  const int luma_xy = (mx & 3) + ((my & 3) << 2);
219  ptrdiff_t offset = (mx >> 2) * (1 << pixel_shift) + (my >> 2) * sl->mb_linesize;
220  uint8_t *src_y = pic->data[0] + offset;
221  uint8_t *src_cb, *src_cr;
222  int extra_width = 0;
223  int extra_height = 0;
224  int emu = 0;
225  const int full_mx = mx >> 2;
226  const int full_my = my >> 2;
227  const int pic_width = 16 * h->mb_width;
228  const int pic_height = 16 * h->mb_height >> MB_FIELD(sl);
229  int ysh;
230 
231  if (mx & 7)
232  extra_width -= 3;
233  if (my & 7)
234  extra_height -= 3;
235 
236  if (full_mx < 0 - extra_width ||
237  full_my < 0 - extra_height ||
238  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
239  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
240  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
241  src_y - (2 << pixel_shift) - 2 * sl->mb_linesize,
242  sl->mb_linesize, sl->mb_linesize,
243  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
244  full_my - 2, pic_width, pic_height);
245  src_y = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
246  emu = 1;
247  }
248 
249  qpix_op[luma_xy](dest_y, src_y, sl->mb_linesize); // FIXME try variable height perhaps?
250  if (!square)
251  qpix_op[luma_xy](dest_y + delta, src_y + delta, sl->mb_linesize);
252 
253  if (CONFIG_GRAY && h->flags & AV_CODEC_FLAG_GRAY)
254  return;
255 
256  if (chroma_idc == 3 /* yuv444 */) {
257  src_cb = pic->data[1] + offset;
258  if (emu) {
259  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
260  src_cb - (2 << pixel_shift) - 2 * sl->mb_linesize,
261  sl->mb_linesize, sl->mb_linesize,
262  16 + 5, 16 + 5 /*FIXME*/,
263  full_mx - 2, full_my - 2,
264  pic_width, pic_height);
265  src_cb = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
266  }
267  qpix_op[luma_xy](dest_cb, src_cb, sl->mb_linesize); // FIXME try variable height perhaps?
268  if (!square)
269  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, sl->mb_linesize);
270 
271  src_cr = pic->data[2] + offset;
272  if (emu) {
273  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
274  src_cr - (2 << pixel_shift) - 2 * sl->mb_linesize,
275  sl->mb_linesize, sl->mb_linesize,
276  16 + 5, 16 + 5 /*FIXME*/,
277  full_mx - 2, full_my - 2,
278  pic_width, pic_height);
279  src_cr = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
280  }
281  qpix_op[luma_xy](dest_cr, src_cr, sl->mb_linesize); // FIXME try variable height perhaps?
282  if (!square)
283  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, sl->mb_linesize);
284  return;
285  }
286 
287  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
288  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(sl)) {
289  // chroma offset when predicting from a field of opposite parity
290  my += 2 * ((sl->mb_y & 1) - (pic->reference - 1));
291  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
292  }
293 
294  src_cb = pic->data[1] + ((mx >> 3) * (1 << pixel_shift)) +
295  (my >> ysh) * sl->mb_uvlinesize;
296  src_cr = pic->data[2] + ((mx >> 3) * (1 << pixel_shift)) +
297  (my >> ysh) * sl->mb_uvlinesize;
298 
299  if (emu) {
300  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src_cb,
301  sl->mb_uvlinesize, sl->mb_uvlinesize,
302  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
303  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
304  src_cb = sl->edge_emu_buffer;
305  }
306  chroma_op(dest_cb, src_cb, sl->mb_uvlinesize,
307  height >> (chroma_idc == 1 /* yuv420 */),
308  mx & 7, ((unsigned)my << (chroma_idc == 2 /* yuv422 */)) & 7);
309 
310  if (emu) {
311  h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src_cr,
312  sl->mb_uvlinesize, sl->mb_uvlinesize,
313  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
314  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
315  src_cr = sl->edge_emu_buffer;
316  }
317  chroma_op(dest_cr, src_cr, sl->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
318  mx & 7, ((unsigned)my << (chroma_idc == 2 /* yuv422 */)) & 7);
319 }
320 
322  int n, int square,
323  int height, int delta,
324  uint8_t *dest_y, uint8_t *dest_cb,
325  uint8_t *dest_cr,
326  int x_offset, int y_offset,
327  const qpel_mc_func *qpix_put,
328  h264_chroma_mc_func chroma_put,
329  const qpel_mc_func *qpix_avg,
330  h264_chroma_mc_func chroma_avg,
331  int list0, int list1,
332  int pixel_shift, int chroma_idc)
333 {
334  const qpel_mc_func *qpix_op = qpix_put;
335  h264_chroma_mc_func chroma_op = chroma_put;
336 
337  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
338  if (chroma_idc == 3 /* yuv444 */) {
339  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
340  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
341  } else if (chroma_idc == 2 /* yuv422 */) {
342  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
343  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
344  } else { /* yuv420 */
345  dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
346  dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
347  }
348  x_offset += 8 * sl->mb_x;
349  y_offset += 8 * (sl->mb_y >> MB_FIELD(sl));
350 
351  if (list0) {
352  H264Ref *ref = &sl->ref_list[0][sl->ref_cache[0][scan8[n]]];
353  mc_dir_part(h, sl, ref, n, square, height, delta, 0,
354  dest_y, dest_cb, dest_cr, x_offset, y_offset,
355  qpix_op, chroma_op, pixel_shift, chroma_idc);
356 
357  qpix_op = qpix_avg;
358  chroma_op = chroma_avg;
359  }
360 
361  if (list1) {
362  H264Ref *ref = &sl->ref_list[1][sl->ref_cache[1][scan8[n]]];
363  mc_dir_part(h, sl, ref, n, square, height, delta, 1,
364  dest_y, dest_cb, dest_cr, x_offset, y_offset,
365  qpix_op, chroma_op, pixel_shift, chroma_idc);
366  }
367 }
368 
370  int n, int square,
371  int height, int delta,
372  uint8_t *dest_y, uint8_t *dest_cb,
373  uint8_t *dest_cr,
374  int x_offset, int y_offset,
375  const qpel_mc_func *qpix_put,
376  h264_chroma_mc_func chroma_put,
377  h264_weight_func luma_weight_op,
378  h264_weight_func chroma_weight_op,
379  h264_biweight_func luma_weight_avg,
380  h264_biweight_func chroma_weight_avg,
381  int list0, int list1,
382  int pixel_shift, int chroma_idc)
383 {
384  int chroma_height;
385 
386  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
387  if (chroma_idc == 3 /* yuv444 */) {
388  chroma_height = height;
389  chroma_weight_avg = luma_weight_avg;
390  chroma_weight_op = luma_weight_op;
391  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
392  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
393  } else if (chroma_idc == 2 /* yuv422 */) {
394  chroma_height = height;
395  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
396  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
397  } else { /* yuv420 */
398  chroma_height = height >> 1;
399  dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
400  dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
401  }
402  x_offset += 8 * sl->mb_x;
403  y_offset += 8 * (sl->mb_y >> MB_FIELD(sl));
404 
405  if (list0 && list1) {
406  /* don't optimize for luma-only case, since B-frames usually
407  * use implicit weights => chroma too. */
408  uint8_t *tmp_cb = sl->bipred_scratchpad;
409  uint8_t *tmp_cr = sl->bipred_scratchpad + (16 << pixel_shift);
410  uint8_t *tmp_y = sl->bipred_scratchpad + 16 * sl->mb_uvlinesize;
411  int refn0 = sl->ref_cache[0][scan8[n]];
412  int refn1 = sl->ref_cache[1][scan8[n]];
413 
414  mc_dir_part(h, sl, &sl->ref_list[0][refn0], n, square, height, delta, 0,
415  dest_y, dest_cb, dest_cr,
416  x_offset, y_offset, qpix_put, chroma_put,
417  pixel_shift, chroma_idc);
418  mc_dir_part(h, sl, &sl->ref_list[1][refn1], n, square, height, delta, 1,
419  tmp_y, tmp_cb, tmp_cr,
420  x_offset, y_offset, qpix_put, chroma_put,
421  pixel_shift, chroma_idc);
422 
423  if (sl->pwt.use_weight == 2) {
424  int weight0 = sl->pwt.implicit_weight[refn0][refn1][sl->mb_y & 1];
425  int weight1 = 64 - weight0;
426  luma_weight_avg(dest_y, tmp_y, sl->mb_linesize,
427  height, 5, weight0, weight1, 0);
428  if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
429  chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize,
430  chroma_height, 5, weight0, weight1, 0);
431  chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize,
432  chroma_height, 5, weight0, weight1, 0);
433  }
434  } else {
435  luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, height,
437  sl->pwt.luma_weight[refn0][0][0],
438  sl->pwt.luma_weight[refn1][1][0],
439  sl->pwt.luma_weight[refn0][0][1] +
440  sl->pwt.luma_weight[refn1][1][1]);
441  if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
442  chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height,
444  sl->pwt.chroma_weight[refn0][0][0][0],
445  sl->pwt.chroma_weight[refn1][1][0][0],
446  sl->pwt.chroma_weight[refn0][0][0][1] +
447  sl->pwt.chroma_weight[refn1][1][0][1]);
448  chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, chroma_height,
450  sl->pwt.chroma_weight[refn0][0][1][0],
451  sl->pwt.chroma_weight[refn1][1][1][0],
452  sl->pwt.chroma_weight[refn0][0][1][1] +
453  sl->pwt.chroma_weight[refn1][1][1][1]);
454  }
455  }
456  } else {
457  int list = list1 ? 1 : 0;
458  int refn = sl->ref_cache[list][scan8[n]];
459  H264Ref *ref = &sl->ref_list[list][refn];
460  mc_dir_part(h, sl, ref, n, square, height, delta, list,
461  dest_y, dest_cb, dest_cr, x_offset, y_offset,
462  qpix_put, chroma_put, pixel_shift, chroma_idc);
463 
464  luma_weight_op(dest_y, sl->mb_linesize, height,
466  sl->pwt.luma_weight[refn][list][0],
467  sl->pwt.luma_weight[refn][list][1]);
468  if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
469  if (sl->pwt.use_weight_chroma) {
470  chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height,
472  sl->pwt.chroma_weight[refn][list][0][0],
473  sl->pwt.chroma_weight[refn][list][0][1]);
474  chroma_weight_op(dest_cr, sl->mb_uvlinesize, chroma_height,
476  sl->pwt.chroma_weight[refn][list][1][0],
477  sl->pwt.chroma_weight[refn][list][1][1]);
478  }
479  }
480  }
481 }
482 
484  int list, int pixel_shift,
485  int chroma_idc)
486 {
487  /* fetch pixels for estimated mv 4 macroblocks ahead
488  * optimized for 64byte cache lines */
489  const int refn = sl->ref_cache[list][scan8[0]];
490  if (refn >= 0) {
491  const int mx = (sl->mv_cache[list][scan8[0]][0] >> 2) + 16 * sl->mb_x + 8;
492  const int my = (sl->mv_cache[list][scan8[0]][1] >> 2) + 16 * sl->mb_y;
493  uint8_t **src = sl->ref_list[list][refn].data;
494  int off = mx * (1<< pixel_shift) +
495  (my + (sl->mb_x & 3) * 4) * sl->mb_linesize +
496  (64 << pixel_shift);
497  h->vdsp.prefetch(src[0] + off, sl->linesize, 4);
498  if (chroma_idc == 3 /* yuv444 */) {
499  h->vdsp.prefetch(src[1] + off, sl->linesize, 4);
500  h->vdsp.prefetch(src[2] + off, sl->linesize, 4);
501  } else {
502  off= ((mx>>1)+64) * (1<<pixel_shift) + ((my>>1) + (sl->mb_x&7))*sl->uvlinesize;
503  h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
504  }
505  }
506 }
507 
509  uint8_t *src_y,
510  uint8_t *src_cb, uint8_t *src_cr,
511  int linesize, int uvlinesize,
512  int xchg, int chroma444,
513  int simple, int pixel_shift)
514 {
515  int deblock_topleft;
516  int deblock_top;
517  int top_idx = 1;
518  uint8_t *top_border_m1;
519  uint8_t *top_border;
520 
521  if (!simple && FRAME_MBAFF(h)) {
522  if (sl->mb_y & 1) {
523  if (!MB_MBAFF(sl))
524  return;
525  } else {
526  top_idx = MB_MBAFF(sl) ? 0 : 1;
527  }
528  }
529 
530  if (sl->deblocking_filter == 2) {
531  deblock_topleft = h->slice_table[sl->mb_xy - 1 - h->mb_stride] == sl->slice_num;
532  deblock_top = sl->top_type;
533  } else {
534  deblock_topleft = (sl->mb_x > 0);
535  deblock_top = (sl->mb_y > !!MB_FIELD(sl));
536  }
537 
538  src_y -= linesize + 1 + pixel_shift;
539  src_cb -= uvlinesize + 1 + pixel_shift;
540  src_cr -= uvlinesize + 1 + pixel_shift;
541 
542  top_border_m1 = sl->top_borders[top_idx][sl->mb_x - 1];
543  top_border = sl->top_borders[top_idx][sl->mb_x];
544 
545 #define XCHG(a, b, xchg) \
546  if (pixel_shift) { \
547  if (xchg) { \
548  AV_SWAP64(b + 0, a + 0); \
549  AV_SWAP64(b + 8, a + 8); \
550  } else { \
551  AV_COPY128(b, a); \
552  } \
553  } else if (xchg) \
554  AV_SWAP64(b, a); \
555  else \
556  AV_COPY64(b, a);
557 
558  if (deblock_top) {
559  if (deblock_topleft) {
560  XCHG(top_border_m1 + (8 << pixel_shift),
561  src_y - (7 << pixel_shift), 1);
562  }
563  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
564  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
565  if (sl->mb_x + 1 < h->mb_width) {
566  XCHG(sl->top_borders[top_idx][sl->mb_x + 1],
567  src_y + (17 << pixel_shift), 1);
568  }
569  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
570  if (chroma444) {
571  if (deblock_topleft) {
572  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
573  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
574  }
575  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
576  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
577  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
578  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
579  if (sl->mb_x + 1 < h->mb_width) {
580  XCHG(sl->top_borders[top_idx][sl->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
581  XCHG(sl->top_borders[top_idx][sl->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
582  }
583  } else {
584  if (deblock_topleft) {
585  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
586  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
587  }
588  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
589  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
590  }
591  }
592  }
593 }
594 
595 static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
596  int index)
597 {
598  if (high_bit_depth) {
599  return AV_RN32A(((int32_t *)mb) + index);
600  } else
601  return AV_RN16A(mb + index);
602 }
603 
604 static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
605  int index, int value)
606 {
607  if (high_bit_depth) {
608  AV_WN32A(((int32_t *)mb) + index, value);
609  } else
610  AV_WN16A(mb + index, value);
611 }
612 
614  H264SliceContext *sl,
615  int mb_type, int simple,
616  int transform_bypass,
617  int pixel_shift,
618  const int *block_offset,
619  int linesize,
620  uint8_t *dest_y, int p)
621 {
622  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
623  void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
624  int i;
625  int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1];
626  block_offset += 16 * p;
627  if (IS_INTRA4x4(mb_type)) {
628  if (IS_8x8DCT(mb_type)) {
629  if (transform_bypass) {
630  idct_dc_add =
631  idct_add = h->h264dsp.h264_add_pixels8_clear;
632  } else {
633  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
634  idct_add = h->h264dsp.h264_idct8_add;
635  }
636  for (i = 0; i < 16; i += 4) {
637  uint8_t *const ptr = dest_y + block_offset[i];
638  const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
639  if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
640  if (h->x264_build < 151U) {
641  h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
642  } else
643  h->hpc.pred8x8l_filter_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift),
644  (sl-> topleft_samples_available << i) & 0x8000,
645  (sl->topright_samples_available << i) & 0x4000, linesize);
646  } else {
647  const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
648  h->hpc.pred8x8l[dir](ptr, (sl->topleft_samples_available << i) & 0x8000,
649  (sl->topright_samples_available << i) & 0x4000, linesize);
650  if (nnz) {
651  if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
652  idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
653  else
654  idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
655  }
656  }
657  }
658  } else {
659  if (transform_bypass) {
660  idct_dc_add =
661  idct_add = h->h264dsp.h264_add_pixels4_clear;
662  } else {
663  idct_dc_add = h->h264dsp.h264_idct_dc_add;
664  idct_add = h->h264dsp.h264_idct_add;
665  }
666  for (i = 0; i < 16; i++) {
667  uint8_t *const ptr = dest_y + block_offset[i];
668  const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
669 
670  if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
671  h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
672  } else {
673  uint8_t *topright;
674  int nnz, tr;
675  uint64_t tr_high;
676  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
677  const int topright_avail = (sl->topright_samples_available << i) & 0x8000;
678  av_assert2(sl->mb_y || linesize <= block_offset[i]);
679  if (!topright_avail) {
680  if (pixel_shift) {
681  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
682  topright = (uint8_t *)&tr_high;
683  } else {
684  tr = ptr[3 - linesize] * 0x01010101u;
685  topright = (uint8_t *)&tr;
686  }
687  } else
688  topright = ptr + (4 << pixel_shift) - linesize;
689  } else
690  topright = NULL;
691 
692  h->hpc.pred4x4[dir](ptr, topright, linesize);
693  nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
694  if (nnz) {
695  if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
696  idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
697  else
698  idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
699  }
700  }
701  }
702  }
703  } else {
704  h->hpc.pred16x16[sl->intra16x16_pred_mode](dest_y, linesize);
706  if (!transform_bypass)
707  h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift),
708  sl->mb_luma_dc[p],
709  h->ps.pps->dequant4_coeff[p][qscale][0]);
710  else {
711  static const uint8_t dc_mapping[16] = {
712  0 * 16, 1 * 16, 4 * 16, 5 * 16,
713  2 * 16, 3 * 16, 6 * 16, 7 * 16,
714  8 * 16, 9 * 16, 12 * 16, 13 * 16,
715  10 * 16, 11 * 16, 14 * 16, 15 * 16
716  };
717  for (i = 0; i < 16; i++)
718  dctcoef_set(sl->mb + (p * 256 << pixel_shift),
719  pixel_shift, dc_mapping[i],
720  dctcoef_get(sl->mb_luma_dc[p],
721  pixel_shift, i));
722  }
723  }
724  }
725 }
726 
728  int mb_type, int simple,
729  int transform_bypass,
730  int pixel_shift,
731  const int *block_offset,
732  int linesize,
733  uint8_t *dest_y, int p)
734 {
735  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
736  int i;
737  block_offset += 16 * p;
738  if (!IS_INTRA4x4(mb_type)) {
739  if (IS_INTRA16x16(mb_type)) {
740  if (transform_bypass) {
741  if (h->ps.sps->profile_idc == 244 &&
744  h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset,
745  sl->mb + (p * 256 << pixel_shift),
746  linesize);
747  } else {
748  for (i = 0; i < 16; i++)
749  if (sl->non_zero_count_cache[scan8[i + p * 16]] ||
750  dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
751  h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
752  sl->mb + (i * 16 + p * 256 << pixel_shift),
753  linesize);
754  }
755  } else {
756  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
757  sl->mb + (p * 256 << pixel_shift),
758  linesize,
759  sl->non_zero_count_cache + p * 5 * 8);
760  }
761  } else if (sl->cbp & 15) {
762  if (transform_bypass) {
763  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
764  idct_add = IS_8x8DCT(mb_type) ? h->h264dsp.h264_add_pixels8_clear
765  : h->h264dsp.h264_add_pixels4_clear;
766  for (i = 0; i < 16; i += di)
767  if (sl->non_zero_count_cache[scan8[i + p * 16]])
768  idct_add(dest_y + block_offset[i],
769  sl->mb + (i * 16 + p * 256 << pixel_shift),
770  linesize);
771  } else {
772  if (IS_8x8DCT(mb_type))
773  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
774  sl->mb + (p * 256 << pixel_shift),
775  linesize,
776  sl->non_zero_count_cache + p * 5 * 8);
777  else
778  h->h264dsp.h264_idct_add16(dest_y, block_offset,
779  sl->mb + (p * 256 << pixel_shift),
780  linesize,
781  sl->non_zero_count_cache + p * 5 * 8);
782  }
783  }
784  }
785 }
786 
787 #define BITS 8
788 #define SIMPLE 1
789 #include "h264_mb_template.c"
790 
791 #undef BITS
792 #define BITS 16
793 #include "h264_mb_template.c"
794 
795 #undef SIMPLE
796 #define SIMPLE 0
797 #include "h264_mb_template.c"
798 
800 {
801  const int mb_xy = sl->mb_xy;
802  const int mb_type = h->cur_pic.mb_type[mb_xy];
803  int is_complex = CONFIG_SMALL || sl->is_complex ||
804  IS_INTRA_PCM(mb_type) || sl->qscale == 0;
805 
806  if (CHROMA444(h)) {
807  if (is_complex || h->pixel_shift)
808  hl_decode_mb_444_complex(h, sl);
809  else
810  hl_decode_mb_444_simple_8(h, sl);
811  } else if (is_complex) {
812  hl_decode_mb_complex(h, sl);
813  } else if (h->pixel_shift) {
814  hl_decode_mb_simple_16(h, sl);
815  } else
816  hl_decode_mb_simple_8(h, sl);
817 }
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:74
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:88
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
stride
int stride
Definition: mace.c:144
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:243
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:312
H264SliceContext::topleft_samples_available
unsigned int topleft_samples_available
Definition: h264dec.h:233
await_references
static void await_references(const H264Context *h, H264SliceContext *sl)
Wait until all reference frames are available for MC operations.
Definition: h264_mb.c:96
mc_dir_part
static av_always_inline void mc_dir_part(const H264Context *h, H264SliceContext *sl, H264Ref *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, const qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:205
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
h264_biweight_func
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:35
H264Ref
Definition: h264dec.h:178
XCHG
#define XCHG(a, b, xchg)
H264SliceContext::mb
int16_t mb[16 *48 *2]
Definition: h264dec.h:319
IS_SUB_4X4
#define IS_SUB_4X4(a)
Definition: mpegutils.h:92
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:73
index
fg index
Definition: ffmpeg_filter.c:167
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:32
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
h264_mb_template.c
H264SliceContext::sub_mb_type
uint16_t sub_mb_type[4]
as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
Definition: h264dec.h:316
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:250
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:242
H264SliceContext
Definition: h264dec.h:189
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:74
mc_part_weighted
static av_always_inline void mc_part_weighted(const H264Context *h, H264SliceContext *sl, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, const qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:369
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:311
U
#define U(x)
Definition: vp56_arith.h:37
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:205
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:33
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:31
H264Ref::data
uint8_t * data[3]
Definition: h264dec.h:179
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:194
mc_part_std
static av_always_inline void mc_part_std(const H264Context *h, H264SliceContext *sl, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, const qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, const qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:321
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
H264PredWeightTable::chroma_weight
int chroma_weight[48][2][2][2]
Definition: h264_parse.h:39
intreadwrite.h
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:34
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:86
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:76
IS_SUB_4X8
#define IS_SUB_4X8(a)
Definition: mpegutils.h:91
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:186
AV_WN16A
#define AV_WN16A(p, v)
Definition: intreadwrite.h:534
dctcoef_set
static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth, int index, int value)
Definition: h264_mb.c:604
H264PredWeightTable::luma_weight
int luma_weight[48][2][2]
Definition: h264_parse.h:38
xchg_mb_border
static av_always_inline void xchg_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264_mb.c:508
prefetch_motion
static av_always_inline void prefetch_motion(const H264Context *h, H264SliceContext *sl, int list, int pixel_shift, int chroma_idc)
Definition: h264_mb.c:483
H264SliceContext::topright_samples_available
unsigned int topright_samples_available
Definition: h264dec.h:235
if
if(ret)
Definition: filter_design.txt:179
IS_DIR
#define IS_DIR(a, part, list)
Definition: mpegutils.h:95
NULL
#define NULL
Definition: coverity.c:32
IS_INTRA_PCM
#define IS_INTRA_PCM(a)
Definition: mpegutils.h:81
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:296
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:200
src
#define src
Definition: vp8dsp.c:255
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
H264SliceContext::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
as mb is addressed by scantable[i] and scantable is uint8_t we can either check that i is not too lar...
Definition: h264dec.h:320
H264SliceContext::qscale
int qscale
Definition: h264dec.h:199
h264_ps.h
H264SliceContext::top_type
int top_type
Definition: h264dec.h:226
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
H264SliceContext::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264dec.h:217
h264_weight_func
void(* h264_weight_func)(uint8_t *block, ptrdiff_t stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:33
H264Picture::reference
int reference
Definition: h264dec.h:165
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:240
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:209
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:132
H264SliceContext::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: h264dec.h:215
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:297
H264SliceContext::cbp
int cbp
Definition: h264dec.h:267
LUMA_DC_BLOCK_INDEX
#define LUMA_DC_BLOCK_INDEX
Definition: h264dec.h:660
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:242
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:40
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:238
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:75
mb
#define mb
Definition: vf_colormatrix.c:101
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:75
IS_16X16
#define IS_16X16(a)
Definition: mpegutils.h:85
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:163
IS_SUB_8X4
#define IS_SUB_8X4(a)
Definition: mpegutils.h:90
h264dec.h
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
H264Context
H264Context.
Definition: h264dec.h:350
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
get_lowest_part_list_y
static int get_lowest_part_list_y(H264SliceContext *sl, int n, int height, int y_offset, int list)
Definition: h264_mb.c:39
common.h
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:239
delta
float delta
Definition: vorbis_enc_data.h:430
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:280
avcodec.h
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:526
dctcoef_get
static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth, int index)
Definition: h264_mb.c:595
square
static int square(int x)
Definition: roqvideoenc.c:195
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:238
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264SliceContext *sl, int mb_type, int simple, int transform_bypass, int pixel_shift, const int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264_mb.c:727
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:295
AV_RN16A
#define AV_RN16A(p)
Definition: intreadwrite.h:522
get_lowest_part_y
static void get_lowest_part_y(const H264Context *h, H264SliceContext *sl, int16_t refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264_mb.c:52
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:664
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:87
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:281
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:306
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
int32_t
int32_t
Definition: audioconvert.c:56
idct_add
static void idct_add(uint8_t *dst, int stride, const uint8_t *src, int in_linesize, int *block)
Definition: mv30.c:168
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:101
h
h
Definition: vp9dsp_template.c:2038
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:106
H264Ref::reference
int reference
Definition: h264dec.h:182
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, H264SliceContext *sl, int mb_type, int simple, int transform_bypass, int pixel_shift, const int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264_mb.c:613
IS_SUB_8X8
#define IS_SUB_8X8(a)
Definition: mpegutils.h:89