28 #ifndef AVCODEC_H264_H 
   29 #define AVCODEC_H264_H 
   46 #define H264_MAX_PICTURE_COUNT 36 
   47 #define H264_MAX_THREADS       32 
   49 #define MAX_SPS_COUNT          32 
   50 #define MAX_PPS_COUNT         256 
   52 #define MAX_MMCO_COUNT         66 
   54 #define MAX_DELAYED_PIC_COUNT  16 
   56 #define MAX_MBPAIR_SIZE (256*1024) // a tighter bound could be calculated if someone cares about a few bytes 
   60 #define ALLOW_INTERLACE 
   70 #ifdef ALLOW_INTERLACE 
   71 #define MB_MBAFF(h)    (h)->mb_mbaff 
   72 #define MB_FIELD(h)    (h)->mb_field_decoding_flag 
   73 #define FRAME_MBAFF(h) (h)->mb_aff_frame 
   74 #define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME) 
   82 #define FRAME_MBAFF(h)   0 
   83 #define FIELD_PICTURE(h) 0 
   85 #define IS_INTERLACED(mb_type) 0 
   91 #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h)) 
   94 #define CABAC(h) (h)->pps.cabac 
   97 #define CHROMA(h)    ((h)->sps.chroma_format_idc) 
   98 #define CHROMA422(h) ((h)->sps.chroma_format_idc == 2) 
   99 #define CHROMA444(h) ((h)->sps.chroma_format_idc == 3) 
  101 #define EXTENDED_SAR       255 
  103 #define MB_TYPE_REF0       MB_TYPE_ACPRED // dirty but it fits in 16 bit 
  104 #define MB_TYPE_8x8DCT     0x01000000 
  105 #define IS_REF0(a)         ((a) & MB_TYPE_REF0) 
  106 #define IS_8x8DCT(a)       ((a) & MB_TYPE_8x8DCT) 
  108 #define QP_MAX_NUM (51 + 6*6)           // The maximum supported qp 
  415 #define LIST_NOT_USED -1 // FIXME rename? 
  416 #define PART_NOT_AVAILABLE -2 
  727 #define FRAME_RECOVERED_IDR  (1 << 0) 
  732 #define FRAME_RECOVERED_SEI  (1 << 1) 
  800                                   int *dst_length, 
int *consumed, 
int length);
 
  895 #define COPY_PICTURE(dst, src) \ 
  898     (dst)->f.extended_data = (dst)->f.data;\ 
  899     (dst)->tf.f = &(dst)->f;\ 
  932 #define LUMA_DC_BLOCK_INDEX   48 
  933 #define CHROMA_DC_BLOCK_INDEX 49 
  937     4 +  1 * 8, 5 +  1 * 8, 4 +  2 * 8, 5 +  2 * 8,
 
  938     6 +  1 * 8, 7 +  1 * 8, 6 +  2 * 8, 7 +  2 * 8,
 
  939     4 +  3 * 8, 5 +  3 * 8, 4 +  4 * 8, 5 +  4 * 8,
 
  940     6 +  3 * 8, 7 +  3 * 8, 6 +  4 * 8, 7 +  4 * 8,
 
  941     4 +  6 * 8, 5 +  6 * 8, 4 +  7 * 8, 5 +  7 * 8,
 
  942     6 +  6 * 8, 7 +  6 * 8, 6 +  7 * 8, 7 +  7 * 8,
 
  943     4 +  8 * 8, 5 +  8 * 8, 4 +  9 * 8, 5 +  9 * 8,
 
  944     6 +  8 * 8, 7 +  8 * 8, 6 +  9 * 8, 7 +  9 * 8,
 
  945     4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
 
  946     6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
 
  947     4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
 
  948     6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
 
  949     0 +  0 * 8, 0 +  5 * 8, 0 + 10 * 8
 
  955     return (b & 0xFFFF) + (a << 16);
 
  957     return (a & 0xFFFF) + (b << 16);
 
  964     return (b & 0xFF) + (a << 8);
 
  966     return (a & 0xFF) + (b << 8);
 
  983     const int index8 = 
scan8[
n];
 
  988     tprintf(h->
avctx, 
"mode:%d %d min:%d\n", left, top, min);
 
 1001     AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
 
 1002     i4x4[4] = i4x4_cache[7 + 8 * 3];
 
 1003     i4x4[5] = i4x4_cache[7 + 8 * 2];
 
 1004     i4x4[6] = i4x4_cache[7 + 8 * 1];
 
 1013     AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
 
 1014     AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
 
 1015     AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
 
 1016     AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
 
 1017     AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
 
 1018     AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
 
 1019     AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
 
 1020     AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
 
 1023         AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
 
 1024         AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
 
 1025         AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
 
 1026         AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
 
 1032                                                     int b_xy, 
int b8_xy,
 
 1033                                                     int mb_type, 
int list)
 
 1037     AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
 
 1038     AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
 
 1039     AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
 
 1040     AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
 
 1049             AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
 
 1050             AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
 
 1051             AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
 
 1058         ref_index[0 + 0 * 2] = ref_cache[
scan8[0]];
 
 1059         ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
 
 1060         ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
 
 1061         ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
 
 1069     const int b8_xy = 4 * h->
mb_xy;
 
 1095                   0x0001000100010001ULL));
 
 1099                   0x0001000100010001ULL));
 
 1103                            int buf_index, 
int next_avc)
 
 1105     uint32_t 
state = -1;
 
 1109     return FFMIN(buf_index, buf_size);
 
 1113                            int buf_size, 
int *buf_index)
 
 1121         nalsize = ((
unsigned)nalsize << 8) | buf[(*buf_index)++];
 
 1122     if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
 
 1124                "AVC: nal size %d\n", nalsize);
 
 1144 #define SLICE_SINGLETHREAD 1 
 1145 #define SLICE_SKIPED 2