FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/thread.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "error_resilience.h"
36 #include "mpegutils.h"
37 #include "mpegvideo.h"
38 #include "mpegvideodec.h"
39 #include "golomb.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "qpeldsp.h"
43 #include "rectangle.h"
44 #include "thread.h"
45 #include "threadframe.h"
46 
47 #include "rv34vlc.h"
48 #include "rv34data.h"
49 #include "rv34.h"
50 
51 static inline void ZERO8x2(void* dst, int stride)
52 {
53  fill_rectangle(dst, 1, 2, stride, 0, 4);
54  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
55 }
56 
57 /** translation of RV30/40 macroblock types to lavc ones */
58 static const int rv34_mb_type_to_lavc[12] = {
71 };
72 
73 
75 
76 static int rv34_decode_mv(RV34DecContext *r, int block_type);
77 
78 /**
79  * @name RV30/40 VLC generating functions
80  * @{
81  */
82 
83 static VLCElem table_data[117592];
84 
85 /**
86  * Generate VLC from codeword lengths.
87  * @param bits codeword lengths (zeroes are accepted)
88  * @param size length of input data
89  * @param vlc output VLC
90  * @param insyms symbols for input codes (NULL for default ones)
91  * @param num VLC table number (for static initialization)
92  */
93 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms,
94  int *offset)
95 {
96  int counts[17] = {0}, codes[17];
97  uint16_t cw[MAX_VLC_SIZE];
98  int maxbits;
99 
100  for (int i = 0; i < size; i++)
101  counts[bits[i]]++;
102 
103  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
104  * So we reset it here. The code assigned to this element is 0x00. */
105  codes[0] = counts[0] = 0;
106  for (int i = 0; i < 16; i++) {
107  codes[i+1] = (codes[i] + counts[i]) << 1;
108  if (counts[i])
109  maxbits = i;
110  }
111  for (int i = 0; i < size; i++)
112  cw[i] = codes[bits[i]]++;
113 
114  vlc->table = &table_data[*offset];
116  ff_init_vlc_sparse(vlc, FFMIN(maxbits, 9), size,
117  bits, 1, 1,
118  cw, 2, 2,
119  syms, !!syms, !!syms, INIT_VLC_STATIC_OVERLONG);
120  *offset += vlc->table_size;
121 }
122 
123 /**
124  * Initialize all tables.
125  */
126 static av_cold void rv34_init_tables(void)
127 {
128  int i, j, k, offset = 0;
129 
130  for(i = 0; i < NUM_INTRA_TABLES; i++){
131  for(j = 0; j < 2; j++){
133  &intra_vlcs[i].cbppattern[j], NULL, &offset);
135  &intra_vlcs[i].second_pattern[j], NULL, &offset);
137  &intra_vlcs[i].third_pattern[j], NULL, &offset);
138  for(k = 0; k < 4; k++){
140  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, &offset);
141  }
142  }
143  for(j = 0; j < 4; j++){
145  &intra_vlcs[i].first_pattern[j], NULL, &offset);
146  }
148  &intra_vlcs[i].coefficient, NULL, &offset);
149  }
150 
151  for(i = 0; i < NUM_INTER_TABLES; i++){
153  &inter_vlcs[i].cbppattern[0], NULL, &offset);
154  for(j = 0; j < 4; j++){
156  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, &offset);
157  }
158  for(j = 0; j < 2; j++){
160  &inter_vlcs[i].first_pattern[j], NULL, &offset);
162  &inter_vlcs[i].second_pattern[j], NULL, &offset);
164  &inter_vlcs[i].third_pattern[j], NULL, &offset);
165  }
167  &inter_vlcs[i].coefficient, NULL, &offset);
168  }
169 }
170 
171 /** @} */ // vlc group
172 
173 /**
174  * @name RV30/40 4x4 block decoding functions
175  * @{
176  */
177 
178 /**
179  * Decode coded block pattern.
180  */
181 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
182 {
183  int pattern, code, cbp=0;
184  int ones;
185  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
186  static const int shifts[4] = { 0, 2, 8, 10 };
187  const int *curshift = shifts;
188  int i, t, mask;
189 
190  code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2);
191  pattern = code & 0xF;
192  code >>= 4;
193 
194  ones = rv34_count_ones[pattern];
195 
196  for(mask = 8; mask; mask >>= 1, curshift++){
197  if(pattern & mask)
198  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
199  }
200 
201  for(i = 0; i < 4; i++){
202  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
203  if(t == 1)
204  cbp |= cbp_masks[get_bits1(gb)] << i;
205  if(t == 2)
206  cbp |= cbp_masks[2] << i;
207  }
208  return cbp;
209 }
210 
211 /**
212  * Get one coefficient value from the bitstream and store it.
213  */
214 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC* vlc, int q)
215 {
216  if(coef){
217  if(coef == esc){
218  coef = get_vlc2(gb, vlc->table, 9, 2);
219  if(coef > 23){
220  coef -= 23;
221  coef = 22 + ((1 << coef) | get_bits(gb, coef));
222  }
223  coef += esc;
224  }
225  if(get_bits1(gb))
226  coef = -coef;
227  *dst = (coef*q + 8) >> 4;
228  }
229 }
230 
231 /**
232  * Decode 2x2 subblock of coefficients.
233  */
234 static inline void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
235 {
237 
238  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
239  if(is_block2){
240  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
241  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
242  }else{
243  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
244  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
245  }
246  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
247 }
248 
249 /**
250  * Decode a single coefficient.
251  */
252 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
253 {
254  int coeff = modulo_three_table[code] >> 6;
255  decode_coeff(dst, coeff, 3, gb, vlc, q);
256 }
257 
258 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc,
259  int q_dc, int q_ac1, int q_ac2)
260 {
262 
263  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
264  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
265  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
266  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
267 }
268 
269 /**
270  * Decode coefficients for 4x4 block.
271  *
272  * This is done by filling 2x2 subblocks with decoded coefficients
273  * in this order (the same for subblocks and subblock coefficients):
274  * o--o
275  * /
276  * /
277  * o--o
278  */
279 
280 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
281 {
282  int code, pattern, has_ac = 1;
283 
284  code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2);
285 
286  pattern = code & 0x7;
287 
288  code >>= 3;
289 
290  if (modulo_three_table[code] & 0x3F) {
291  decode_subblock3(dst, code, gb, &rvlc->coefficient, q_dc, q_ac1, q_ac2);
292  } else {
293  decode_subblock1(dst, code, gb, &rvlc->coefficient, q_dc);
294  if (!pattern)
295  return 0;
296  has_ac = 0;
297  }
298 
299  if(pattern & 4){
300  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
301  decode_subblock(dst + 4*0+2, code, 0, gb, &rvlc->coefficient, q_ac2);
302  }
303  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
304  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
305  decode_subblock(dst + 4*2+0, code, 1, gb, &rvlc->coefficient, q_ac2);
306  }
307  if(pattern & 1){
308  code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
309  decode_subblock(dst + 4*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
310  }
311  return has_ac | pattern;
312 }
313 
314 /**
315  * @name RV30/40 bitstream parsing
316  * @{
317  */
318 
319 /**
320  * Decode starting slice position.
321  * @todo Maybe replace with ff_h263_decode_mba() ?
322  */
324 {
325  int i;
326  for(i = 0; i < 5; i++)
327  if(rv34_mb_max_sizes[i] >= mb_size - 1)
328  break;
329  return rv34_mb_bits_sizes[i];
330 }
331 
332 /**
333  * Select VLC set for decoding from current quantizer, modifier and frame type.
334  */
335 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
336 {
337  if(mod == 2 && quant < 19) quant += 10;
338  else if(mod && quant < 26) quant += 5;
339  av_assert2(quant >= 0 && quant < 32);
342 }
343 
344 /**
345  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
346  */
347 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
348 {
349  MpegEncContext *s = &r->s;
350  GetBitContext *gb = &s->gb;
351  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
352  int t;
353 
354  r->is16 = get_bits1(gb);
355  if(r->is16){
356  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
357  r->block_type = RV34_MB_TYPE_INTRA16x16;
358  t = get_bits(gb, 2);
359  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
360  r->luma_vlc = 2;
361  }else{
362  if(!r->rv30){
363  if(!get_bits1(gb))
364  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
365  }
366  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
367  r->block_type = RV34_MB_TYPE_INTRA;
368  if(r->decode_intra_types(r, gb, intra_types) < 0)
369  return -1;
370  r->luma_vlc = 1;
371  }
372 
373  r->chroma_vlc = 0;
374  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
375 
376  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
377 }
378 
379 /**
380  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
381  */
382 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
383 {
384  MpegEncContext *s = &r->s;
385  GetBitContext *gb = &s->gb;
386  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
387  int i, t;
388 
389  r->block_type = r->decode_mb_info(r);
390  if(r->block_type == -1)
391  return -1;
392  s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
393  r->mb_type[mb_pos] = r->block_type;
394  if(r->block_type == RV34_MB_SKIP){
395  if(s->pict_type == AV_PICTURE_TYPE_P)
396  r->mb_type[mb_pos] = RV34_MB_P_16x16;
397  if(s->pict_type == AV_PICTURE_TYPE_B)
398  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
399  }
400  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
401  if (rv34_decode_mv(r, r->block_type) < 0)
402  return -1;
403  if(r->block_type == RV34_MB_SKIP){
404  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
405  return 0;
406  }
407  r->chroma_vlc = 1;
408  r->luma_vlc = 0;
409 
410  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
411  if(r->is16){
412  t = get_bits(gb, 2);
413  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
414  r->luma_vlc = 2;
415  }else{
416  if(r->decode_intra_types(r, gb, intra_types) < 0)
417  return -1;
418  r->luma_vlc = 1;
419  }
420  r->chroma_vlc = 0;
421  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
422  }else{
423  for(i = 0; i < 16; i++)
424  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
425  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
426  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
427  r->is16 = 1;
428  r->chroma_vlc = 1;
429  r->luma_vlc = 2;
430  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
431  }
432  }
433 
434  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
435 }
436 
437 /** @} */ //bitstream functions
438 
439 /**
440  * @name motion vector related code (prediction, reconstruction, motion compensation)
441  * @{
442  */
443 
444 /** macroblock partition width in 8x8 blocks */
445 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
446 
447 /** macroblock partition height in 8x8 blocks */
448 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
449 
450 /** availability index for subblocks */
451 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
452 
453 /**
454  * motion vector prediction
455  *
456  * Motion prediction performed for the block by using median prediction of
457  * motion vectors from the left, top and right top blocks but in corner cases
458  * some other vectors may be used instead.
459  */
460 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
461 {
462  MpegEncContext *s = &r->s;
463  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
464  int A[2] = {0}, B[2], C[2];
465  int i, j;
466  int mx, my;
467  int* avail = r->avail_cache + avail_indexes[subblock_no];
468  int c_off = part_sizes_w[block_type];
469 
470  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
471  if(subblock_no == 3)
472  c_off = -1;
473 
474  if(avail[-1]){
475  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
476  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
477  }
478  if(avail[-4]){
479  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
480  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
481  }else{
482  B[0] = A[0];
483  B[1] = A[1];
484  }
485  if(!avail[c_off-4]){
486  if(avail[-4] && (avail[-1] || r->rv30)){
487  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
488  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
489  }else{
490  C[0] = A[0];
491  C[1] = A[1];
492  }
493  }else{
494  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
495  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
496  }
497  mx = mid_pred(A[0], B[0], C[0]);
498  my = mid_pred(A[1], B[1], C[1]);
499  mx += r->dmv[dmv_no][0];
500  my += r->dmv[dmv_no][1];
501  for(j = 0; j < part_sizes_h[block_type]; j++){
502  for(i = 0; i < part_sizes_w[block_type]; i++){
503  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
504  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
505  }
506  }
507 }
508 
509 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
510 
511 /**
512  * Calculate motion vector component that should be added for direct blocks.
513  */
514 static int calc_add_mv(RV34DecContext *r, int dir, int val)
515 {
516  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
517 
518  return (int)(val * (SUINT)mul + 0x2000) >> 14;
519 }
520 
521 /**
522  * Predict motion vector for B-frame macroblock.
523  */
524 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
525  int A_avail, int B_avail, int C_avail,
526  int *mx, int *my)
527 {
528  if(A_avail + B_avail + C_avail != 3){
529  *mx = A[0] + B[0] + C[0];
530  *my = A[1] + B[1] + C[1];
531  if(A_avail + B_avail + C_avail == 2){
532  *mx /= 2;
533  *my /= 2;
534  }
535  }else{
536  *mx = mid_pred(A[0], B[0], C[0]);
537  *my = mid_pred(A[1], B[1], C[1]);
538  }
539 }
540 
541 /**
542  * motion vector prediction for B-frames
543  */
544 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
545 {
546  MpegEncContext *s = &r->s;
547  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
548  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
549  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
550  int has_A = 0, has_B = 0, has_C = 0;
551  int mx, my;
552  int i, j;
553  Picture *cur_pic = s->current_picture_ptr;
554  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
555  int type = cur_pic->mb_type[mb_pos];
556 
557  if((r->avail_cache[6-1] & type) & mask){
558  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
559  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
560  has_A = 1;
561  }
562  if((r->avail_cache[6-4] & type) & mask){
563  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
564  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
565  has_B = 1;
566  }
567  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
568  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
569  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
570  has_C = 1;
571  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
572  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
573  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
574  has_C = 1;
575  }
576 
577  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
578 
579  mx += r->dmv[dir][0];
580  my += r->dmv[dir][1];
581 
582  for(j = 0; j < 2; j++){
583  for(i = 0; i < 2; i++){
584  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
585  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
586  }
587  }
588  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
589  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
590  }
591 }
592 
593 /**
594  * motion vector prediction - RV3 version
595  */
596 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
597 {
598  MpegEncContext *s = &r->s;
599  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
600  int A[2] = {0}, B[2], C[2];
601  int i, j, k;
602  int mx, my;
603  int* avail = r->avail_cache + avail_indexes[0];
604 
605  if(avail[-1]){
606  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
607  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
608  }
609  if(avail[-4]){
610  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
611  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
612  }else{
613  B[0] = A[0];
614  B[1] = A[1];
615  }
616  if(!avail[-4 + 2]){
617  if(avail[-4] && (avail[-1])){
618  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
619  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
620  }else{
621  C[0] = A[0];
622  C[1] = A[1];
623  }
624  }else{
625  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
626  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
627  }
628  mx = mid_pred(A[0], B[0], C[0]);
629  my = mid_pred(A[1], B[1], C[1]);
630  mx += r->dmv[0][0];
631  my += r->dmv[0][1];
632  for(j = 0; j < 2; j++){
633  for(i = 0; i < 2; i++){
634  for(k = 0; k < 2; k++){
635  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
636  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
637  }
638  }
639  }
640 }
641 
642 static const int chroma_coeffs[3] = { 0, 3, 5 };
643 
644 /**
645  * generic motion compensation function
646  *
647  * @param r decoder context
648  * @param block_type type of the current block
649  * @param xoff horizontal offset from the start of the current block
650  * @param yoff vertical offset from the start of the current block
651  * @param mv_off offset to the motion vector information
652  * @param width width of the current partition in 8x8 blocks
653  * @param height height of the current partition in 8x8 blocks
654  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
655  * @param thirdpel motion vectors are specified in 1/3 of pixel
656  * @param qpel_mc a set of functions used to perform luma motion compensation
657  * @param chroma_mc a set of functions used to perform chroma motion compensation
658  */
659 static inline void rv34_mc(RV34DecContext *r, const int block_type,
660  const int xoff, const int yoff, int mv_off,
661  const int width, const int height, int dir,
662  const int thirdpel, int weighted,
663  qpel_mc_func (*qpel_mc)[16],
665 {
666  MpegEncContext *s = &r->s;
667  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
668  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
669  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
670  int is16x16 = 1;
671  int emu = 0;
672 
673  if(thirdpel){
674  int chroma_mx, chroma_my;
675  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
676  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
677  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
678  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
679  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
680  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
681  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
682  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
683  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
684  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
685  }else{
686  int cx, cy;
687  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
688  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
689  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
690  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
691  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
692  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
693  umx = cx >> 2;
694  umy = cy >> 2;
695  uvmx = (cx & 3) << 1;
696  uvmy = (cy & 3) << 1;
697  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
698  if(uvmx == 6 && uvmy == 6)
699  uvmx = uvmy = 4;
700  }
701 
702  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
703  /* wait for the referenced mb row to be finished */
704  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
705  const ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
706  ff_thread_await_progress(f, mb_row, 0);
707  }
708 
709  dxy = ly*4 + lx;
710  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
711  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
712  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
713  src_x = s->mb_x * 16 + xoff + mx;
714  src_y = s->mb_y * 16 + yoff + my;
715  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
716  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
717  srcY += src_y * s->linesize + src_x;
718  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
719  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
720  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
721  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
722  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
723  srcY -= 2 + 2*s->linesize;
724  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
725  s->linesize, s->linesize,
726  (width << 3) + 6, (height << 3) + 6,
727  src_x - 2, src_y - 2,
728  s->h_edge_pos, s->v_edge_pos);
729  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
730  emu = 1;
731  }
732  if(!weighted){
733  Y = s->dest[0] + xoff + yoff *s->linesize;
734  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
735  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
736  }else{
737  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
738  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
739  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
740  }
741 
742  if(block_type == RV34_MB_P_16x8){
743  qpel_mc[1][dxy](Y, srcY, s->linesize);
744  Y += 8;
745  srcY += 8;
746  }else if(block_type == RV34_MB_P_8x16){
747  qpel_mc[1][dxy](Y, srcY, s->linesize);
748  Y += 8 * s->linesize;
749  srcY += 8 * s->linesize;
750  }
751  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
752  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
753  if (emu) {
754  uint8_t *uvbuf = s->sc.edge_emu_buffer;
755 
756  s->vdsp.emulated_edge_mc(uvbuf, srcU,
757  s->uvlinesize, s->uvlinesize,
758  (width << 2) + 1, (height << 2) + 1,
759  uvsrc_x, uvsrc_y,
760  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
761  srcU = uvbuf;
762  uvbuf += 9*s->uvlinesize;
763 
764  s->vdsp.emulated_edge_mc(uvbuf, srcV,
765  s->uvlinesize, s->uvlinesize,
766  (width << 2) + 1, (height << 2) + 1,
767  uvsrc_x, uvsrc_y,
768  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
769  srcV = uvbuf;
770  }
771  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
772  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
773 }
774 
775 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
776  const int xoff, const int yoff, int mv_off,
777  const int width, const int height, int dir)
778 {
779  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
780  r->rdsp.put_pixels_tab,
781  r->rdsp.put_chroma_pixels_tab);
782 }
783 
785 {
786  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
787  r->tmp_b_block_y[0],
788  r->tmp_b_block_y[1],
789  r->weight1,
790  r->weight2,
791  r->s.linesize);
792  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
793  r->tmp_b_block_uv[0],
794  r->tmp_b_block_uv[2],
795  r->weight1,
796  r->weight2,
797  r->s.uvlinesize);
798  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
799  r->tmp_b_block_uv[1],
800  r->tmp_b_block_uv[3],
801  r->weight1,
802  r->weight2,
803  r->s.uvlinesize);
804 }
805 
806 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
807 {
808  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
809 
810  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
811  r->rdsp.put_pixels_tab,
812  r->rdsp.put_chroma_pixels_tab);
813  if(!weighted){
814  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
815  r->rdsp.avg_pixels_tab,
816  r->rdsp.avg_chroma_pixels_tab);
817  }else{
818  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
819  r->rdsp.put_pixels_tab,
820  r->rdsp.put_chroma_pixels_tab);
821  rv4_weight(r);
822  }
823 }
824 
826 {
827  int i, j;
828  int weighted = !r->rv30 && r->weight1 != 8192;
829 
830  for(j = 0; j < 2; j++)
831  for(i = 0; i < 2; i++){
832  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
833  weighted,
834  r->rdsp.put_pixels_tab,
835  r->rdsp.put_chroma_pixels_tab);
836  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
837  weighted,
838  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
839  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
840  }
841  if(weighted)
842  rv4_weight(r);
843 }
844 
845 /** number of motion vectors in each macroblock type */
846 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
847 
848 /**
849  * Decode motion vector differences
850  * and perform motion vector reconstruction and motion compensation.
851  */
852 static int rv34_decode_mv(RV34DecContext *r, int block_type)
853 {
854  MpegEncContext *s = &r->s;
855  GetBitContext *gb = &s->gb;
856  int i, j, k, l;
857  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
858  int next_bt;
859 
860  memset(r->dmv, 0, sizeof(r->dmv));
861  for(i = 0; i < num_mvs[block_type]; i++){
862  r->dmv[i][0] = get_interleaved_se_golomb(gb);
863  r->dmv[i][1] = get_interleaved_se_golomb(gb);
864  if (r->dmv[i][0] == INVALID_VLC ||
865  r->dmv[i][1] == INVALID_VLC) {
866  r->dmv[i][0] = r->dmv[i][1] = 0;
867  return AVERROR_INVALIDDATA;
868  }
869  }
870  switch(block_type){
871  case RV34_MB_TYPE_INTRA:
873  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
874  return 0;
875  case RV34_MB_SKIP:
876  if(s->pict_type == AV_PICTURE_TYPE_P){
877  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
878  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
879  break;
880  }
881  case RV34_MB_B_DIRECT:
882  //surprisingly, it uses motion scheme from next reference frame
883  /* wait for the current mb row to be finished */
884  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
885  ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
886 
887  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
888  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
889  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
890  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
891  }else
892  for(j = 0; j < 2; j++)
893  for(i = 0; i < 2; i++)
894  for(k = 0; k < 2; k++)
895  for(l = 0; l < 2; l++)
896  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
897  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
898  rv34_mc_2mv(r, block_type);
899  else
901  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
902  break;
903  case RV34_MB_P_16x16:
904  case RV34_MB_P_MIX16x16:
905  rv34_pred_mv(r, block_type, 0, 0);
906  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
907  break;
908  case RV34_MB_B_FORWARD:
909  case RV34_MB_B_BACKWARD:
910  r->dmv[1][0] = r->dmv[0][0];
911  r->dmv[1][1] = r->dmv[0][1];
912  if(r->rv30)
913  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
914  else
915  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
916  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
917  break;
918  case RV34_MB_P_16x8:
919  case RV34_MB_P_8x16:
920  rv34_pred_mv(r, block_type, 0, 0);
921  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
922  if(block_type == RV34_MB_P_16x8){
923  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
924  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
925  }
926  if(block_type == RV34_MB_P_8x16){
927  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
928  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
929  }
930  break;
931  case RV34_MB_B_BIDIR:
932  rv34_pred_mv_b (r, block_type, 0);
933  rv34_pred_mv_b (r, block_type, 1);
934  rv34_mc_2mv (r, block_type);
935  break;
936  case RV34_MB_P_8x8:
937  for(i=0;i< 4;i++){
938  rv34_pred_mv(r, block_type, i, i);
939  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
940  }
941  break;
942  }
943 
944  return 0;
945 }
946 /** @} */ // mv group
947 
948 /**
949  * @name Macroblock reconstruction functions
950  * @{
951  */
952 /** mapping of RV30/40 intra prediction types to standard H.264 types */
953 static const int ittrans[9] = {
956 };
957 
958 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
959 static const int ittrans16[4] = {
961 };
962 
963 /**
964  * Perform 4x4 intra prediction.
965  */
966 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
967 {
968  uint8_t *prev = dst - stride + 4;
969  uint32_t topleft;
970 
971  if(!up && !left)
972  itype = DC_128_PRED;
973  else if(!up){
974  if(itype == VERT_PRED) itype = HOR_PRED;
975  if(itype == DC_PRED) itype = LEFT_DC_PRED;
976  }else if(!left){
977  if(itype == HOR_PRED) itype = VERT_PRED;
978  if(itype == DC_PRED) itype = TOP_DC_PRED;
980  }
981  if(!down){
983  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
984  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
985  }
986  if(!right && up){
987  topleft = dst[-stride + 3] * 0x01010101u;
988  prev = (uint8_t*)&topleft;
989  }
990  r->h.pred4x4[itype](dst, prev, stride);
991 }
992 
993 static inline int adjust_pred16(int itype, int up, int left)
994 {
995  if(!up && !left)
996  itype = DC_128_PRED8x8;
997  else if(!up){
998  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
999  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1000  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1001  }else if(!left){
1002  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1003  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1004  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1005  }
1006  return itype;
1007 }
1008 
1010  uint8_t *pdst, int stride,
1011  int fc, int sc, int q_dc, int q_ac)
1012 {
1013  MpegEncContext *s = &r->s;
1014  int16_t *ptr = s->block[0];
1015  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1016  fc, sc, q_dc, q_ac, q_ac);
1017  if(has_ac){
1018  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1019  }else{
1020  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1021  ptr[0] = 0;
1022  }
1023 }
1024 
1025 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1026 {
1027  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1028  MpegEncContext *s = &r->s;
1029  GetBitContext *gb = &s->gb;
1030  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1031  q_ac = rv34_qscale_tab[s->qscale];
1032  uint8_t *dst = s->dest[0];
1033  int16_t *ptr = s->block[0];
1034  int i, j, itype, has_ac;
1035 
1036  memset(block16, 0, 16 * sizeof(*block16));
1037 
1038  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1039  if(has_ac)
1040  r->rdsp.rv34_inv_transform(block16);
1041  else
1042  r->rdsp.rv34_inv_transform_dc(block16);
1043 
1044  itype = ittrans16[intra_types[0]];
1045  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1046  r->h.pred16x16[itype](dst, s->linesize);
1047 
1048  for(j = 0; j < 4; j++){
1049  for(i = 0; i < 4; i++, cbp >>= 1){
1050  int dc = block16[i + j*4];
1051 
1052  if(cbp & 1){
1053  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1054  }else
1055  has_ac = 0;
1056 
1057  if(has_ac){
1058  ptr[0] = dc;
1059  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1060  }else
1061  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1062  }
1063 
1064  dst += 4*s->linesize;
1065  }
1066 
1067  itype = ittrans16[intra_types[0]];
1068  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1069  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1070 
1071  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1072  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1073 
1074  for(j = 1; j < 3; j++){
1075  dst = s->dest[j];
1076  r->h.pred8x8[itype](dst, s->uvlinesize);
1077  for(i = 0; i < 4; i++, cbp >>= 1){
1078  uint8_t *pdst;
1079  if(!(cbp & 1)) continue;
1080  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1081 
1082  rv34_process_block(r, pdst, s->uvlinesize,
1083  r->chroma_vlc, 1, q_dc, q_ac);
1084  }
1085  }
1086 }
1087 
1088 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1089 {
1090  MpegEncContext *s = &r->s;
1091  uint8_t *dst = s->dest[0];
1092  int avail[6*8] = {0};
1093  int i, j, k;
1094  int idx, q_ac, q_dc;
1095 
1096  // Set neighbour information.
1097  if(r->avail_cache[1])
1098  avail[0] = 1;
1099  if(r->avail_cache[2])
1100  avail[1] = avail[2] = 1;
1101  if(r->avail_cache[3])
1102  avail[3] = avail[4] = 1;
1103  if(r->avail_cache[4])
1104  avail[5] = 1;
1105  if(r->avail_cache[5])
1106  avail[8] = avail[16] = 1;
1107  if(r->avail_cache[9])
1108  avail[24] = avail[32] = 1;
1109 
1110  q_ac = rv34_qscale_tab[s->qscale];
1111  for(j = 0; j < 4; j++){
1112  idx = 9 + j*8;
1113  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1114  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1115  avail[idx] = 1;
1116  if(!(cbp & 1)) continue;
1117 
1118  rv34_process_block(r, dst, s->linesize,
1119  r->luma_vlc, 0, q_ac, q_ac);
1120  }
1121  dst += s->linesize * 4 - 4*4;
1122  intra_types += r->intra_types_stride;
1123  }
1124 
1125  intra_types -= r->intra_types_stride * 4;
1126 
1127  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1128  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1129 
1130  for(k = 0; k < 2; k++){
1131  dst = s->dest[1+k];
1132  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1133 
1134  for(j = 0; j < 2; j++){
1135  int* acache = r->avail_cache + 6 + j*4;
1136  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1137  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1138  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1139  acache[0] = 1;
1140 
1141  if(!(cbp&1)) continue;
1142 
1143  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1144  r->chroma_vlc, 1, q_dc, q_ac);
1145  }
1146 
1147  dst += 4*s->uvlinesize;
1148  }
1149  }
1150 }
1151 
1152 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1153 {
1154  int d;
1155  d = motion_val[0][0] - motion_val[-step][0];
1156  if(d < -3 || d > 3)
1157  return 1;
1158  d = motion_val[0][1] - motion_val[-step][1];
1159  if(d < -3 || d > 3)
1160  return 1;
1161  return 0;
1162 }
1163 
1165 {
1166  MpegEncContext *s = &r->s;
1167  int hmvmask = 0, vmvmask = 0, i, j;
1168  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1169  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1170  for(j = 0; j < 16; j += 8){
1171  for(i = 0; i < 2; i++){
1172  if(is_mv_diff_gt_3(motion_val + i, 1))
1173  vmvmask |= 0x11 << (j + i*2);
1174  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1175  hmvmask |= 0x03 << (j + i*2);
1176  }
1177  motion_val += s->b8_stride;
1178  }
1179  if(s->first_slice_line)
1180  hmvmask &= ~0x000F;
1181  if(!s->mb_x)
1182  vmvmask &= ~0x1111;
1183  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1184  vmvmask |= (vmvmask & 0x4444) >> 1;
1185  hmvmask |= (hmvmask & 0x0F00) >> 4;
1186  if(s->mb_x)
1187  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1188  if(!s->first_slice_line)
1189  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1190  }
1191  return hmvmask | vmvmask;
1192 }
1193 
1194 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1195 {
1196  MpegEncContext *s = &r->s;
1197  GetBitContext *gb = &s->gb;
1198  uint8_t *dst = s->dest[0];
1199  int16_t *ptr = s->block[0];
1200  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1201  int cbp, cbp2;
1202  int q_dc, q_ac, has_ac;
1203  int i, j;
1204  int dist;
1205 
1206  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1207  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1208  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1209  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1210  if(s->mb_x && dist)
1211  r->avail_cache[5] =
1212  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1213  if(dist >= s->mb_width)
1214  r->avail_cache[2] =
1215  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1216  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1217  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1218  if(s->mb_x && dist > s->mb_width)
1219  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1220 
1221  s->qscale = r->si.quant;
1222  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1223  r->cbp_luma [mb_pos] = cbp;
1224  r->cbp_chroma[mb_pos] = cbp >> 16;
1225  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1226  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1227 
1228  if(cbp == -1)
1229  return -1;
1230 
1231  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1232  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1233  else rv34_output_intra(r, intra_types, cbp);
1234  return 0;
1235  }
1236 
1237  if(r->is16){
1238  // Only for RV34_MB_P_MIX16x16
1239  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1240  memset(block16, 0, 16 * sizeof(*block16));
1241  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1242  q_ac = rv34_qscale_tab[s->qscale];
1243  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1244  r->rdsp.rv34_inv_transform(block16);
1245  else
1246  r->rdsp.rv34_inv_transform_dc(block16);
1247 
1248  q_ac = rv34_qscale_tab[s->qscale];
1249 
1250  for(j = 0; j < 4; j++){
1251  for(i = 0; i < 4; i++, cbp >>= 1){
1252  int dc = block16[i + j*4];
1253 
1254  if(cbp & 1){
1255  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1256  }else
1257  has_ac = 0;
1258 
1259  if(has_ac){
1260  ptr[0] = dc;
1261  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1262  }else
1263  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1264  }
1265 
1266  dst += 4*s->linesize;
1267  }
1268 
1269  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1270  }else{
1271  q_ac = rv34_qscale_tab[s->qscale];
1272 
1273  for(j = 0; j < 4; j++){
1274  for(i = 0; i < 4; i++, cbp >>= 1){
1275  if(!(cbp & 1)) continue;
1276 
1277  rv34_process_block(r, dst + 4*i, s->linesize,
1278  r->luma_vlc, 0, q_ac, q_ac);
1279  }
1280  dst += 4*s->linesize;
1281  }
1282  }
1283 
1284  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1285  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1286 
1287  for(j = 1; j < 3; j++){
1288  dst = s->dest[j];
1289  for(i = 0; i < 4; i++, cbp >>= 1){
1290  uint8_t *pdst;
1291  if(!(cbp & 1)) continue;
1292  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1293 
1294  rv34_process_block(r, pdst, s->uvlinesize,
1295  r->chroma_vlc, 1, q_dc, q_ac);
1296  }
1297  }
1298 
1299  return 0;
1300 }
1301 
1302 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1303 {
1304  MpegEncContext *s = &r->s;
1305  int cbp, dist;
1306  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1307 
1308  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1309  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1310  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1311  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1312  if(s->mb_x && dist)
1313  r->avail_cache[5] =
1314  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1315  if(dist >= s->mb_width)
1316  r->avail_cache[2] =
1317  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1318  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1319  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1320  if(s->mb_x && dist > s->mb_width)
1321  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1322 
1323  s->qscale = r->si.quant;
1324  cbp = rv34_decode_intra_mb_header(r, intra_types);
1325  r->cbp_luma [mb_pos] = cbp;
1326  r->cbp_chroma[mb_pos] = cbp >> 16;
1327  r->deblock_coefs[mb_pos] = 0xFFFF;
1328  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1329 
1330  if(cbp == -1)
1331  return -1;
1332 
1333  if(r->is16){
1334  rv34_output_i16x16(r, intra_types, cbp);
1335  return 0;
1336  }
1337 
1338  rv34_output_intra(r, intra_types, cbp);
1339  return 0;
1340 }
1341 
1343 {
1344  int bits;
1345  if(s->mb_y >= s->mb_height)
1346  return 1;
1347  if(!s->mb_num_left)
1348  return 1;
1349  if(r->s.mb_skip_run > 1)
1350  return 0;
1351  bits = get_bits_left(&s->gb);
1352  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1353  return 1;
1354  return 0;
1355 }
1356 
1357 
1359 {
1360  av_freep(&r->intra_types_hist);
1361  r->intra_types = NULL;
1362  av_freep(&r->tmp_b_block_base);
1363  av_freep(&r->mb_type);
1364  av_freep(&r->cbp_luma);
1365  av_freep(&r->cbp_chroma);
1366  av_freep(&r->deblock_coefs);
1367 }
1368 
1369 
1371 {
1372  r->intra_types_stride = r->s.mb_width * 4 + 4;
1373 
1374  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1375  sizeof(*r->cbp_chroma));
1376  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1377  sizeof(*r->cbp_luma));
1378  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1379  sizeof(*r->deblock_coefs));
1380  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1381  sizeof(*r->intra_types_hist));
1382  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1383  sizeof(*r->mb_type));
1384 
1385  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1386  r->intra_types_hist && r->mb_type)) {
1387  r->s.context_reinit = 1;
1389  return AVERROR(ENOMEM);
1390  }
1391 
1392  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1393 
1394  return 0;
1395 }
1396 
1397 
1399 {
1401  return rv34_decoder_alloc(r);
1402 }
1403 
1404 
1405 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1406 {
1407  MpegEncContext *s = &r->s;
1408  GetBitContext *gb = &s->gb;
1409  int mb_pos, slice_type;
1410  int res;
1411 
1412  init_get_bits(&r->s.gb, buf, buf_size*8);
1413  res = r->parse_slice_header(r, gb, &r->si);
1414  if(res < 0){
1415  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1416  return -1;
1417  }
1418 
1419  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1420  if (slice_type != s->pict_type) {
1421  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1422  return AVERROR_INVALIDDATA;
1423  }
1424  if (s->width != r->si.width || s->height != r->si.height) {
1425  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1426  return AVERROR_INVALIDDATA;
1427  }
1428 
1429  r->si.end = end;
1430  s->qscale = r->si.quant;
1431  s->mb_num_left = r->si.end - r->si.start;
1432  r->s.mb_skip_run = 0;
1433 
1434  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1435  if(r->si.start != mb_pos){
1436  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1437  s->mb_x = r->si.start % s->mb_width;
1438  s->mb_y = r->si.start / s->mb_width;
1439  }
1440  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1441  s->first_slice_line = 1;
1442  s->resync_mb_x = s->mb_x;
1443  s->resync_mb_y = s->mb_y;
1444 
1446  while(!check_slice_end(r, s)) {
1447  ff_update_block_index(s, 8, 0, 1);
1448 
1449  if(r->si.type)
1450  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1451  else
1452  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1453  if(res < 0){
1454  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1455  return -1;
1456  }
1457  if (++s->mb_x == s->mb_width) {
1458  s->mb_x = 0;
1459  s->mb_y++;
1461 
1462  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1463  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1464 
1465  if(r->loop_filter && s->mb_y >= 2)
1466  r->loop_filter(r, s->mb_y - 2);
1467 
1468  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1469  ff_thread_report_progress(&s->current_picture_ptr->tf,
1470  s->mb_y - 2, 0);
1471 
1472  }
1473  if(s->mb_x == s->resync_mb_x)
1474  s->first_slice_line=0;
1475  s->mb_num_left--;
1476  }
1477  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1478 
1479  return s->mb_y == s->mb_height;
1480 }
1481 
1482 /** @} */ // reconstruction group end
1483 
1484 /**
1485  * Initialize decoder.
1486  */
1488 {
1489  static AVOnce init_static_once = AV_ONCE_INIT;
1490  RV34DecContext *r = avctx->priv_data;
1491  MpegEncContext *s = &r->s;
1492  int ret;
1493 
1494  ff_mpv_decode_init(s, avctx);
1495  s->out_format = FMT_H263;
1496 
1497  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1498  avctx->has_b_frames = 1;
1499  s->low_delay = 0;
1500 
1502  if ((ret = ff_mpv_common_init(s)) < 0)
1503  return ret;
1504 
1505  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1506 
1507  if ((ret = rv34_decoder_alloc(r)) < 0) {
1508  ff_mpv_common_end(&r->s);
1509  return ret;
1510  }
1511 
1512  ff_thread_once(&init_static_once, rv34_init_tables);
1513 
1514  return 0;
1515 }
1516 
1518 {
1519  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1520  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1521  int err;
1522 
1523  if (dst == src || !s1->context_initialized)
1524  return 0;
1525 
1526  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
1527  s->height = s1->height;
1528  s->width = s1->width;
1529  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1530  return err;
1531  if ((err = rv34_decoder_realloc(r)) < 0)
1532  return err;
1533  }
1534 
1535  r->cur_pts = r1->cur_pts;
1536  r->last_pts = r1->last_pts;
1537  r->next_pts = r1->next_pts;
1538 
1539  memset(&r->si, 0, sizeof(r->si));
1540 
1541  // Do no call ff_mpeg_update_thread_context on a partially initialized
1542  // decoder context.
1543  if (!s1->context_initialized)
1544  return 0;
1545 
1546  return ff_mpeg_update_thread_context(dst, src);
1547 }
1548 
1549 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1550 {
1551  if (n < slice_count) {
1552  if(avctx->slice_count) return avctx->slice_offset[n];
1553  else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1554  } else
1555  return buf_size;
1556 }
1557 
1558 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1559 {
1560  RV34DecContext *r = avctx->priv_data;
1561  MpegEncContext *s = &r->s;
1562  int got_picture = 0, ret;
1563 
1564  ff_er_frame_end(&s->er);
1566  s->mb_num_left = 0;
1567 
1568  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1569  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1570 
1571  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1572  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1573  return ret;
1574  ff_print_debug_info(s, s->current_picture_ptr, pict);
1575  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1576  got_picture = 1;
1577  } else if (s->last_picture_ptr) {
1578  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1579  return ret;
1580  ff_print_debug_info(s, s->last_picture_ptr, pict);
1581  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1582  got_picture = 1;
1583  }
1584 
1585  return got_picture;
1586 }
1587 
1588 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1589 {
1590  // attempt to keep aspect during typical resolution switches
1591  if (!sar.num)
1592  sar = (AVRational){1, 1};
1593 
1594  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1595  return sar;
1596 }
1597 
1599  int *got_picture_ptr, AVPacket *avpkt)
1600 {
1601  const uint8_t *buf = avpkt->data;
1602  int buf_size = avpkt->size;
1603  RV34DecContext *r = avctx->priv_data;
1604  MpegEncContext *s = &r->s;
1605  SliceInfo si;
1606  int i, ret;
1607  int slice_count;
1608  const uint8_t *slices_hdr = NULL;
1609  int last = 0;
1610  int faulty_b = 0;
1611  int offset;
1612 
1613  /* no supplementary picture */
1614  if (buf_size == 0) {
1615  /* special case for last picture */
1616  if (s->low_delay==0 && s->next_picture_ptr) {
1617  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1618  return ret;
1619  s->next_picture_ptr = NULL;
1620 
1621  *got_picture_ptr = 1;
1622  }
1623  return 0;
1624  }
1625 
1626  if(!avctx->slice_count){
1627  slice_count = (*buf++) + 1;
1628  slices_hdr = buf + 4;
1629  buf += 8 * slice_count;
1630  buf_size -= 1 + 8 * slice_count;
1631  }else
1632  slice_count = avctx->slice_count;
1633 
1634  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1635  //parse first slice header to check whether this frame can be decoded
1636  if(offset < 0 || offset > buf_size){
1637  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1638  return AVERROR_INVALIDDATA;
1639  }
1640  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1641  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1642  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1643  return AVERROR_INVALIDDATA;
1644  }
1645  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1646  si.type == AV_PICTURE_TYPE_B) {
1647  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1648  "reference data.\n");
1649  faulty_b = 1;
1650  }
1651  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1652  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1653  || avctx->skip_frame >= AVDISCARD_ALL)
1654  return avpkt->size;
1655 
1656  /* first slice */
1657  if (si.start == 0) {
1658  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1659  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1660  s->mb_num_left);
1661  if (!s->context_reinit)
1662  ff_er_frame_end(&s->er);
1664  }
1665 
1666  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1667  int err;
1668 
1669  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1670  si.width, si.height);
1671 
1672  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1673  return AVERROR_INVALIDDATA;
1674 
1675  s->avctx->sample_aspect_ratio = update_sar(
1676  s->width, s->height, s->avctx->sample_aspect_ratio,
1677  si.width, si.height);
1678  s->width = si.width;
1679  s->height = si.height;
1680 
1681  err = ff_set_dimensions(s->avctx, s->width, s->height);
1682  if (err < 0)
1683  return err;
1684  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1685  return err;
1686  if ((err = rv34_decoder_realloc(r)) < 0)
1687  return err;
1688  }
1689  if (faulty_b)
1690  return AVERROR_INVALIDDATA;
1691  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1692  if (ff_mpv_frame_start(s, s->avctx) < 0)
1693  return -1;
1695  if (!r->tmp_b_block_base) {
1696  int i;
1697 
1698  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1699  for (i = 0; i < 2; i++)
1700  r->tmp_b_block_y[i] = r->tmp_b_block_base
1701  + i * 16 * s->linesize;
1702  for (i = 0; i < 4; i++)
1703  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1704  + (i >> 1) * 8 * s->uvlinesize
1705  + (i & 1) * 16;
1706  }
1707  r->cur_pts = si.pts;
1708  if (s->pict_type != AV_PICTURE_TYPE_B) {
1709  r->last_pts = r->next_pts;
1710  r->next_pts = r->cur_pts;
1711  } else {
1712  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1713  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1714  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1715 
1716  if(!refdist){
1717  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1718  r->scaled_weight = 0;
1719  }else{
1720  if (FFMAX(dist0, dist1) > refdist)
1721  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1722 
1723  r->mv_weight1 = (dist0 << 14) / refdist;
1724  r->mv_weight2 = (dist1 << 14) / refdist;
1725  if((r->mv_weight1|r->mv_weight2) & 511){
1726  r->weight1 = r->mv_weight1;
1727  r->weight2 = r->mv_weight2;
1728  r->scaled_weight = 0;
1729  }else{
1730  r->weight1 = r->mv_weight1 >> 9;
1731  r->weight2 = r->mv_weight2 >> 9;
1732  r->scaled_weight = 1;
1733  }
1734  }
1735  }
1736  s->mb_x = s->mb_y = 0;
1737  ff_thread_finish_setup(s->avctx);
1738  } else if (s->context_reinit) {
1739  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1740  "reinitialize (start MB is %d).\n", si.start);
1741  return AVERROR_INVALIDDATA;
1742  } else if (HAVE_THREADS &&
1743  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1744  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1745  "multithreading mode (start MB is %d).\n", si.start);
1746  return AVERROR_INVALIDDATA;
1747  }
1748 
1749  for(i = 0; i < slice_count; i++){
1750  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1751  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1752  int size;
1753 
1754  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1755  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1756  break;
1757  }
1758  size = offset1 - offset;
1759 
1760  r->si.end = s->mb_width * s->mb_height;
1761  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1762 
1763  if(i+1 < slice_count){
1764  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1765  if (offset2 < offset1 || offset2 > buf_size) {
1766  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1767  break;
1768  }
1769  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1770  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1771  size = offset2 - offset;
1772  }else
1773  r->si.end = si.start;
1774  }
1775  av_assert0 (size >= 0 && size <= buf_size - offset);
1776  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1777  if(last)
1778  break;
1779  }
1780 
1781  if (s->current_picture_ptr) {
1782  if (last) {
1783  if(r->loop_filter)
1784  r->loop_filter(r, s->mb_height - 1);
1785 
1786  ret = finish_frame(avctx, pict);
1787  if (ret < 0)
1788  return ret;
1789  *got_picture_ptr = ret;
1790  } else if (HAVE_THREADS &&
1791  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1792  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1793  /* always mark the current frame as finished, frame-mt supports
1794  * only complete frames */
1795  ff_er_frame_end(&s->er);
1797  s->mb_num_left = 0;
1798  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1799  return AVERROR_INVALIDDATA;
1800  }
1801  }
1802 
1803  return avpkt->size;
1804 }
1805 
1807 {
1808  RV34DecContext *r = avctx->priv_data;
1809 
1810  ff_mpv_common_end(&r->s);
1812 
1813  return 0;
1814 }
RV34DecContext
decoder context
Definition: rv34.h:86
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:668
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:82
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:58
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1088
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1806
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:494
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:596
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
RV34VLC::third_pattern
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:70
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:959
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:846
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:642
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:323
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVCodecContext::slice_offset
int * slice_offset
slice offsets in the frame in bytes
Definition: avcodec.h:759
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1517
AVPacket::data
uint8_t * data
Definition: packet.h:374
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:205
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1398
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1342
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:551
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1569
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1164
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:45
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:451
AVCodecContext::slice_count
int slice_count
slice count
Definition: avcodec.h:752
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
decode_subblock
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:234
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:993
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:49
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1370
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1698
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:460
GetBitContext
Definition: get_bits.h:61
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:52
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:487
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
quant
static int quant(float coef, const float Q, const float rounding)
Quantize one coefficient.
Definition: aacenc_utils.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:775
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:179
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:90
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1487
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:966
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1302
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:611
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:51
mask
static const uint16_t mask[17]
Definition: lzw.c:38
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:65
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:187
width
#define width
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:775
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1194
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:74
s
#define s(width, name)
Definition: cbs_vp9.c:256
RV34VLC::second_pattern
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:69
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:80
s1
#define s1
Definition: regdef.h:38
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:67
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:514
bits
uint8_t bits
Definition: vp3data.h:141
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:131
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:74
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
IS_INTRA
#define IS_INTRA(x, y)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1558
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:39
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:49
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:177
decode_subblock3
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:258
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:509
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1405
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:126
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:51
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:50
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:76
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
RV34VLC::cbppattern
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:66
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:176
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:852
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:48
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
VLC::table_allocated
int table_allocated
Definition: vlc.h:34
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:214
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:825
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
f
f
Definition: af_crystalizer.c:122
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1152
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:50
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1598
rectangle.h
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:602
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1588
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:54
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:27
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:263
height
#define height
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:69
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1025
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:46
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:544
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1477
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:81
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:448
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: vlc.c:272
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:37
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:806
rv34_gen_vlc
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:93
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:57
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
table_data
static VLCElem table_data[117592]
Definition: rv34.c:83
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:75
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1549
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:750
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
decode_subblock1
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:252
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
VLC::bits
int bits
Definition: vlc.h:32
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
INIT_VLC_STATIC_OVERLONG
#define INIT_VLC_STATIC_OVERLONG
Definition: vlc.h:101
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:784
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:51
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext
main external API structure.
Definition: avcodec.h:398
SliceInfo::start
int start
Definition: rv34.h:79
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
ThreadFrame
Definition: threadframe.h:27
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:382
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:445
VLC
Definition: vlc.h:31
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:953
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:814
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:479
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:280
VLC::table
VLCElem * table
Definition: vlc.h:33
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:347
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:81
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1009
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:56
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:34
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
RV34VLC::coefficient
VLC coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:71
RV34VLC::first_pattern
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:68
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:659
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:38
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:53
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:892
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:74
mpeg_er.h
d
d
Definition: ffmpeg_filter.c:156
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:80
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:52
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:47
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:335
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:55
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:181
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1358
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:524