FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/thread.h"
33 
34 #include "avcodec.h"
35 #include "error_resilience.h"
36 #include "mpegutils.h"
37 #include "mpegvideo.h"
38 #include "mpegvideodec.h"
39 #include "golomb.h"
40 #include "internal.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "qpeldsp.h"
44 #include "rectangle.h"
45 #include "thread.h"
46 #include "threadframe.h"
47 
48 #include "rv34vlc.h"
49 #include "rv34data.h"
50 #include "rv34.h"
51 
52 static inline void ZERO8x2(void* dst, int stride)
53 {
54  fill_rectangle(dst, 1, 2, stride, 0, 4);
55  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
56 }
57 
58 /** translation of RV30/40 macroblock types to lavc ones */
59 static const int rv34_mb_type_to_lavc[12] = {
72 };
73 
74 
76 
77 static int rv34_decode_mv(RV34DecContext *r, int block_type);
78 
79 /**
80  * @name RV30/40 VLC generating functions
81  * @{
82  */
83 
84 static VLCElem table_data[117592];
85 
86 /**
87  * Generate VLC from codeword lengths.
88  * @param bits codeword lengths (zeroes are accepted)
89  * @param size length of input data
90  * @param vlc output VLC
91  * @param insyms symbols for input codes (NULL for default ones)
92  * @param num VLC table number (for static initialization)
93  */
94 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms,
95  int *offset)
96 {
97  int counts[17] = {0}, codes[17];
98  uint16_t cw[MAX_VLC_SIZE];
99  int maxbits;
100 
101  for (int i = 0; i < size; i++)
102  counts[bits[i]]++;
103 
104  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
105  * So we reset it here. The code assigned to this element is 0x00. */
106  codes[0] = counts[0] = 0;
107  for (int i = 0; i < 16; i++) {
108  codes[i+1] = (codes[i] + counts[i]) << 1;
109  if (counts[i])
110  maxbits = i;
111  }
112  for (int i = 0; i < size; i++)
113  cw[i] = codes[bits[i]]++;
114 
115  vlc->table = &table_data[*offset];
117  ff_init_vlc_sparse(vlc, FFMIN(maxbits, 9), size,
118  bits, 1, 1,
119  cw, 2, 2,
120  syms, !!syms, !!syms, INIT_VLC_STATIC_OVERLONG);
121  *offset += vlc->table_size;
122 }
123 
124 /**
125  * Initialize all tables.
126  */
127 static av_cold void rv34_init_tables(void)
128 {
129  int i, j, k, offset = 0;
130 
131  for(i = 0; i < NUM_INTRA_TABLES; i++){
132  for(j = 0; j < 2; j++){
134  &intra_vlcs[i].cbppattern[j], NULL, &offset);
136  &intra_vlcs[i].second_pattern[j], NULL, &offset);
138  &intra_vlcs[i].third_pattern[j], NULL, &offset);
139  for(k = 0; k < 4; k++){
141  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, &offset);
142  }
143  }
144  for(j = 0; j < 4; j++){
146  &intra_vlcs[i].first_pattern[j], NULL, &offset);
147  }
149  &intra_vlcs[i].coefficient, NULL, &offset);
150  }
151 
152  for(i = 0; i < NUM_INTER_TABLES; i++){
154  &inter_vlcs[i].cbppattern[0], NULL, &offset);
155  for(j = 0; j < 4; j++){
157  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, &offset);
158  }
159  for(j = 0; j < 2; j++){
161  &inter_vlcs[i].first_pattern[j], NULL, &offset);
163  &inter_vlcs[i].second_pattern[j], NULL, &offset);
165  &inter_vlcs[i].third_pattern[j], NULL, &offset);
166  }
168  &inter_vlcs[i].coefficient, NULL, &offset);
169  }
170 }
171 
172 /** @} */ // vlc group
173 
174 /**
175  * @name RV30/40 4x4 block decoding functions
176  * @{
177  */
178 
179 /**
180  * Decode coded block pattern.
181  */
182 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
183 {
184  int pattern, code, cbp=0;
185  int ones;
186  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
187  static const int shifts[4] = { 0, 2, 8, 10 };
188  const int *curshift = shifts;
189  int i, t, mask;
190 
191  code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2);
192  pattern = code & 0xF;
193  code >>= 4;
194 
195  ones = rv34_count_ones[pattern];
196 
197  for(mask = 8; mask; mask >>= 1, curshift++){
198  if(pattern & mask)
199  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
200  }
201 
202  for(i = 0; i < 4; i++){
203  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
204  if(t == 1)
205  cbp |= cbp_masks[get_bits1(gb)] << i;
206  if(t == 2)
207  cbp |= cbp_masks[2] << i;
208  }
209  return cbp;
210 }
211 
212 /**
213  * Get one coefficient value from the bitstream and store it.
214  */
215 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC* vlc, int q)
216 {
217  if(coef){
218  if(coef == esc){
219  coef = get_vlc2(gb, vlc->table, 9, 2);
220  if(coef > 23){
221  coef -= 23;
222  coef = 22 + ((1 << coef) | get_bits(gb, coef));
223  }
224  coef += esc;
225  }
226  if(get_bits1(gb))
227  coef = -coef;
228  *dst = (coef*q + 8) >> 4;
229  }
230 }
231 
232 /**
233  * Decode 2x2 subblock of coefficients.
234  */
235 static inline void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
236 {
238 
239  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
240  if(is_block2){
241  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
242  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
243  }else{
244  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
245  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
246  }
247  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
248 }
249 
250 /**
251  * Decode a single coefficient.
252  */
253 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
254 {
255  int coeff = modulo_three_table[code] >> 6;
256  decode_coeff(dst, coeff, 3, gb, vlc, q);
257 }
258 
259 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc,
260  int q_dc, int q_ac1, int q_ac2)
261 {
263 
264  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
265  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
266  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
267  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
268 }
269 
270 /**
271  * Decode coefficients for 4x4 block.
272  *
273  * This is done by filling 2x2 subblocks with decoded coefficients
274  * in this order (the same for subblocks and subblock coefficients):
275  * o--o
276  * /
277  * /
278  * o--o
279  */
280 
281 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
282 {
283  int code, pattern, has_ac = 1;
284 
285  code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2);
286 
287  pattern = code & 0x7;
288 
289  code >>= 3;
290 
291  if (modulo_three_table[code] & 0x3F) {
292  decode_subblock3(dst, code, gb, &rvlc->coefficient, q_dc, q_ac1, q_ac2);
293  } else {
294  decode_subblock1(dst, code, gb, &rvlc->coefficient, q_dc);
295  if (!pattern)
296  return 0;
297  has_ac = 0;
298  }
299 
300  if(pattern & 4){
301  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
302  decode_subblock(dst + 4*0+2, code, 0, gb, &rvlc->coefficient, q_ac2);
303  }
304  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
305  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
306  decode_subblock(dst + 4*2+0, code, 1, gb, &rvlc->coefficient, q_ac2);
307  }
308  if(pattern & 1){
309  code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
310  decode_subblock(dst + 4*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
311  }
312  return has_ac | pattern;
313 }
314 
315 /**
316  * @name RV30/40 bitstream parsing
317  * @{
318  */
319 
320 /**
321  * Decode starting slice position.
322  * @todo Maybe replace with ff_h263_decode_mba() ?
323  */
325 {
326  int i;
327  for(i = 0; i < 5; i++)
328  if(rv34_mb_max_sizes[i] >= mb_size - 1)
329  break;
330  return rv34_mb_bits_sizes[i];
331 }
332 
333 /**
334  * Select VLC set for decoding from current quantizer, modifier and frame type.
335  */
336 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
337 {
338  if(mod == 2 && quant < 19) quant += 10;
339  else if(mod && quant < 26) quant += 5;
340  av_assert2(quant >= 0 && quant < 32);
343 }
344 
345 /**
346  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
347  */
348 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
349 {
350  MpegEncContext *s = &r->s;
351  GetBitContext *gb = &s->gb;
352  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
353  int t;
354 
355  r->is16 = get_bits1(gb);
356  if(r->is16){
357  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
358  r->block_type = RV34_MB_TYPE_INTRA16x16;
359  t = get_bits(gb, 2);
360  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
361  r->luma_vlc = 2;
362  }else{
363  if(!r->rv30){
364  if(!get_bits1(gb))
365  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
366  }
367  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
368  r->block_type = RV34_MB_TYPE_INTRA;
369  if(r->decode_intra_types(r, gb, intra_types) < 0)
370  return -1;
371  r->luma_vlc = 1;
372  }
373 
374  r->chroma_vlc = 0;
375  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
376 
377  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
378 }
379 
380 /**
381  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
382  */
383 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
384 {
385  MpegEncContext *s = &r->s;
386  GetBitContext *gb = &s->gb;
387  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
388  int i, t;
389 
390  r->block_type = r->decode_mb_info(r);
391  if(r->block_type == -1)
392  return -1;
393  s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
394  r->mb_type[mb_pos] = r->block_type;
395  if(r->block_type == RV34_MB_SKIP){
396  if(s->pict_type == AV_PICTURE_TYPE_P)
397  r->mb_type[mb_pos] = RV34_MB_P_16x16;
398  if(s->pict_type == AV_PICTURE_TYPE_B)
399  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
400  }
401  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
402  if (rv34_decode_mv(r, r->block_type) < 0)
403  return -1;
404  if(r->block_type == RV34_MB_SKIP){
405  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
406  return 0;
407  }
408  r->chroma_vlc = 1;
409  r->luma_vlc = 0;
410 
411  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
412  if(r->is16){
413  t = get_bits(gb, 2);
414  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
415  r->luma_vlc = 2;
416  }else{
417  if(r->decode_intra_types(r, gb, intra_types) < 0)
418  return -1;
419  r->luma_vlc = 1;
420  }
421  r->chroma_vlc = 0;
422  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
423  }else{
424  for(i = 0; i < 16; i++)
425  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
426  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
427  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
428  r->is16 = 1;
429  r->chroma_vlc = 1;
430  r->luma_vlc = 2;
431  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
432  }
433  }
434 
435  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
436 }
437 
438 /** @} */ //bitstream functions
439 
440 /**
441  * @name motion vector related code (prediction, reconstruction, motion compensation)
442  * @{
443  */
444 
445 /** macroblock partition width in 8x8 blocks */
446 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
447 
448 /** macroblock partition height in 8x8 blocks */
449 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
450 
451 /** availability index for subblocks */
452 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
453 
454 /**
455  * motion vector prediction
456  *
457  * Motion prediction performed for the block by using median prediction of
458  * motion vectors from the left, top and right top blocks but in corner cases
459  * some other vectors may be used instead.
460  */
461 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
462 {
463  MpegEncContext *s = &r->s;
464  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
465  int A[2] = {0}, B[2], C[2];
466  int i, j;
467  int mx, my;
468  int* avail = r->avail_cache + avail_indexes[subblock_no];
469  int c_off = part_sizes_w[block_type];
470 
471  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
472  if(subblock_no == 3)
473  c_off = -1;
474 
475  if(avail[-1]){
476  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
477  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
478  }
479  if(avail[-4]){
480  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
481  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
482  }else{
483  B[0] = A[0];
484  B[1] = A[1];
485  }
486  if(!avail[c_off-4]){
487  if(avail[-4] && (avail[-1] || r->rv30)){
488  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
489  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
490  }else{
491  C[0] = A[0];
492  C[1] = A[1];
493  }
494  }else{
495  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
496  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
497  }
498  mx = mid_pred(A[0], B[0], C[0]);
499  my = mid_pred(A[1], B[1], C[1]);
500  mx += r->dmv[dmv_no][0];
501  my += r->dmv[dmv_no][1];
502  for(j = 0; j < part_sizes_h[block_type]; j++){
503  for(i = 0; i < part_sizes_w[block_type]; i++){
504  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
505  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
506  }
507  }
508 }
509 
510 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
511 
512 /**
513  * Calculate motion vector component that should be added for direct blocks.
514  */
515 static int calc_add_mv(RV34DecContext *r, int dir, int val)
516 {
517  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
518 
519  return (int)(val * (SUINT)mul + 0x2000) >> 14;
520 }
521 
522 /**
523  * Predict motion vector for B-frame macroblock.
524  */
525 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
526  int A_avail, int B_avail, int C_avail,
527  int *mx, int *my)
528 {
529  if(A_avail + B_avail + C_avail != 3){
530  *mx = A[0] + B[0] + C[0];
531  *my = A[1] + B[1] + C[1];
532  if(A_avail + B_avail + C_avail == 2){
533  *mx /= 2;
534  *my /= 2;
535  }
536  }else{
537  *mx = mid_pred(A[0], B[0], C[0]);
538  *my = mid_pred(A[1], B[1], C[1]);
539  }
540 }
541 
542 /**
543  * motion vector prediction for B-frames
544  */
545 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
546 {
547  MpegEncContext *s = &r->s;
548  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
549  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
550  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
551  int has_A = 0, has_B = 0, has_C = 0;
552  int mx, my;
553  int i, j;
554  Picture *cur_pic = s->current_picture_ptr;
555  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
556  int type = cur_pic->mb_type[mb_pos];
557 
558  if((r->avail_cache[6-1] & type) & mask){
559  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
560  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
561  has_A = 1;
562  }
563  if((r->avail_cache[6-4] & type) & mask){
564  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
565  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
566  has_B = 1;
567  }
568  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
569  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
570  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
571  has_C = 1;
572  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
573  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
574  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
575  has_C = 1;
576  }
577 
578  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
579 
580  mx += r->dmv[dir][0];
581  my += r->dmv[dir][1];
582 
583  for(j = 0; j < 2; j++){
584  for(i = 0; i < 2; i++){
585  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
586  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
587  }
588  }
589  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
590  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
591  }
592 }
593 
594 /**
595  * motion vector prediction - RV3 version
596  */
597 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
598 {
599  MpegEncContext *s = &r->s;
600  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
601  int A[2] = {0}, B[2], C[2];
602  int i, j, k;
603  int mx, my;
604  int* avail = r->avail_cache + avail_indexes[0];
605 
606  if(avail[-1]){
607  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
608  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
609  }
610  if(avail[-4]){
611  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
612  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
613  }else{
614  B[0] = A[0];
615  B[1] = A[1];
616  }
617  if(!avail[-4 + 2]){
618  if(avail[-4] && (avail[-1])){
619  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
620  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
621  }else{
622  C[0] = A[0];
623  C[1] = A[1];
624  }
625  }else{
626  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
627  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
628  }
629  mx = mid_pred(A[0], B[0], C[0]);
630  my = mid_pred(A[1], B[1], C[1]);
631  mx += r->dmv[0][0];
632  my += r->dmv[0][1];
633  for(j = 0; j < 2; j++){
634  for(i = 0; i < 2; i++){
635  for(k = 0; k < 2; k++){
636  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
637  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
638  }
639  }
640  }
641 }
642 
643 static const int chroma_coeffs[3] = { 0, 3, 5 };
644 
645 /**
646  * generic motion compensation function
647  *
648  * @param r decoder context
649  * @param block_type type of the current block
650  * @param xoff horizontal offset from the start of the current block
651  * @param yoff vertical offset from the start of the current block
652  * @param mv_off offset to the motion vector information
653  * @param width width of the current partition in 8x8 blocks
654  * @param height height of the current partition in 8x8 blocks
655  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
656  * @param thirdpel motion vectors are specified in 1/3 of pixel
657  * @param qpel_mc a set of functions used to perform luma motion compensation
658  * @param chroma_mc a set of functions used to perform chroma motion compensation
659  */
660 static inline void rv34_mc(RV34DecContext *r, const int block_type,
661  const int xoff, const int yoff, int mv_off,
662  const int width, const int height, int dir,
663  const int thirdpel, int weighted,
664  qpel_mc_func (*qpel_mc)[16],
666 {
667  MpegEncContext *s = &r->s;
668  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
669  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
670  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
671  int is16x16 = 1;
672  int emu = 0;
673 
674  if(thirdpel){
675  int chroma_mx, chroma_my;
676  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
677  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
678  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
679  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
680  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
681  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
682  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
683  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
684  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
685  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
686  }else{
687  int cx, cy;
688  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
689  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
690  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
691  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
692  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
693  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
694  umx = cx >> 2;
695  umy = cy >> 2;
696  uvmx = (cx & 3) << 1;
697  uvmy = (cy & 3) << 1;
698  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
699  if(uvmx == 6 && uvmy == 6)
700  uvmx = uvmy = 4;
701  }
702 
703  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
704  /* wait for the referenced mb row to be finished */
705  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
706  ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
707  ff_thread_await_progress(f, mb_row, 0);
708  }
709 
710  dxy = ly*4 + lx;
711  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
712  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
713  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
714  src_x = s->mb_x * 16 + xoff + mx;
715  src_y = s->mb_y * 16 + yoff + my;
716  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
717  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
718  srcY += src_y * s->linesize + src_x;
719  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
720  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
721  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
722  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
723  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
724  srcY -= 2 + 2*s->linesize;
725  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
726  s->linesize, s->linesize,
727  (width << 3) + 6, (height << 3) + 6,
728  src_x - 2, src_y - 2,
729  s->h_edge_pos, s->v_edge_pos);
730  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
731  emu = 1;
732  }
733  if(!weighted){
734  Y = s->dest[0] + xoff + yoff *s->linesize;
735  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
736  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
737  }else{
738  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
739  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
740  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
741  }
742 
743  if(block_type == RV34_MB_P_16x8){
744  qpel_mc[1][dxy](Y, srcY, s->linesize);
745  Y += 8;
746  srcY += 8;
747  }else if(block_type == RV34_MB_P_8x16){
748  qpel_mc[1][dxy](Y, srcY, s->linesize);
749  Y += 8 * s->linesize;
750  srcY += 8 * s->linesize;
751  }
752  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
753  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
754  if (emu) {
755  uint8_t *uvbuf = s->sc.edge_emu_buffer;
756 
757  s->vdsp.emulated_edge_mc(uvbuf, srcU,
758  s->uvlinesize, s->uvlinesize,
759  (width << 2) + 1, (height << 2) + 1,
760  uvsrc_x, uvsrc_y,
761  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
762  srcU = uvbuf;
763  uvbuf += 9*s->uvlinesize;
764 
765  s->vdsp.emulated_edge_mc(uvbuf, srcV,
766  s->uvlinesize, s->uvlinesize,
767  (width << 2) + 1, (height << 2) + 1,
768  uvsrc_x, uvsrc_y,
769  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
770  srcV = uvbuf;
771  }
772  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
773  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
774 }
775 
776 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
777  const int xoff, const int yoff, int mv_off,
778  const int width, const int height, int dir)
779 {
780  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
781  r->rdsp.put_pixels_tab,
782  r->rdsp.put_chroma_pixels_tab);
783 }
784 
786 {
787  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
788  r->tmp_b_block_y[0],
789  r->tmp_b_block_y[1],
790  r->weight1,
791  r->weight2,
792  r->s.linesize);
793  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
794  r->tmp_b_block_uv[0],
795  r->tmp_b_block_uv[2],
796  r->weight1,
797  r->weight2,
798  r->s.uvlinesize);
799  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
800  r->tmp_b_block_uv[1],
801  r->tmp_b_block_uv[3],
802  r->weight1,
803  r->weight2,
804  r->s.uvlinesize);
805 }
806 
807 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
808 {
809  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
810 
811  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
812  r->rdsp.put_pixels_tab,
813  r->rdsp.put_chroma_pixels_tab);
814  if(!weighted){
815  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
816  r->rdsp.avg_pixels_tab,
817  r->rdsp.avg_chroma_pixels_tab);
818  }else{
819  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
820  r->rdsp.put_pixels_tab,
821  r->rdsp.put_chroma_pixels_tab);
822  rv4_weight(r);
823  }
824 }
825 
827 {
828  int i, j;
829  int weighted = !r->rv30 && r->weight1 != 8192;
830 
831  for(j = 0; j < 2; j++)
832  for(i = 0; i < 2; i++){
833  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
834  weighted,
835  r->rdsp.put_pixels_tab,
836  r->rdsp.put_chroma_pixels_tab);
837  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
838  weighted,
839  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
840  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
841  }
842  if(weighted)
843  rv4_weight(r);
844 }
845 
846 /** number of motion vectors in each macroblock type */
847 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
848 
849 /**
850  * Decode motion vector differences
851  * and perform motion vector reconstruction and motion compensation.
852  */
853 static int rv34_decode_mv(RV34DecContext *r, int block_type)
854 {
855  MpegEncContext *s = &r->s;
856  GetBitContext *gb = &s->gb;
857  int i, j, k, l;
858  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
859  int next_bt;
860 
861  memset(r->dmv, 0, sizeof(r->dmv));
862  for(i = 0; i < num_mvs[block_type]; i++){
863  r->dmv[i][0] = get_interleaved_se_golomb(gb);
864  r->dmv[i][1] = get_interleaved_se_golomb(gb);
865  if (r->dmv[i][0] == INVALID_VLC ||
866  r->dmv[i][1] == INVALID_VLC) {
867  r->dmv[i][0] = r->dmv[i][1] = 0;
868  return AVERROR_INVALIDDATA;
869  }
870  }
871  switch(block_type){
872  case RV34_MB_TYPE_INTRA:
874  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
875  return 0;
876  case RV34_MB_SKIP:
877  if(s->pict_type == AV_PICTURE_TYPE_P){
878  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
879  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
880  break;
881  }
882  case RV34_MB_B_DIRECT:
883  //surprisingly, it uses motion scheme from next reference frame
884  /* wait for the current mb row to be finished */
885  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
886  ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
887 
888  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
889  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
890  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
891  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
892  }else
893  for(j = 0; j < 2; j++)
894  for(i = 0; i < 2; i++)
895  for(k = 0; k < 2; k++)
896  for(l = 0; l < 2; l++)
897  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
898  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
899  rv34_mc_2mv(r, block_type);
900  else
902  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
903  break;
904  case RV34_MB_P_16x16:
905  case RV34_MB_P_MIX16x16:
906  rv34_pred_mv(r, block_type, 0, 0);
907  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
908  break;
909  case RV34_MB_B_FORWARD:
910  case RV34_MB_B_BACKWARD:
911  r->dmv[1][0] = r->dmv[0][0];
912  r->dmv[1][1] = r->dmv[0][1];
913  if(r->rv30)
914  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
915  else
916  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
917  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
918  break;
919  case RV34_MB_P_16x8:
920  case RV34_MB_P_8x16:
921  rv34_pred_mv(r, block_type, 0, 0);
922  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
923  if(block_type == RV34_MB_P_16x8){
924  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
925  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
926  }
927  if(block_type == RV34_MB_P_8x16){
928  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
929  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
930  }
931  break;
932  case RV34_MB_B_BIDIR:
933  rv34_pred_mv_b (r, block_type, 0);
934  rv34_pred_mv_b (r, block_type, 1);
935  rv34_mc_2mv (r, block_type);
936  break;
937  case RV34_MB_P_8x8:
938  for(i=0;i< 4;i++){
939  rv34_pred_mv(r, block_type, i, i);
940  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
941  }
942  break;
943  }
944 
945  return 0;
946 }
947 /** @} */ // mv group
948 
949 /**
950  * @name Macroblock reconstruction functions
951  * @{
952  */
953 /** mapping of RV30/40 intra prediction types to standard H.264 types */
954 static const int ittrans[9] = {
957 };
958 
959 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
960 static const int ittrans16[4] = {
962 };
963 
964 /**
965  * Perform 4x4 intra prediction.
966  */
967 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
968 {
969  uint8_t *prev = dst - stride + 4;
970  uint32_t topleft;
971 
972  if(!up && !left)
973  itype = DC_128_PRED;
974  else if(!up){
975  if(itype == VERT_PRED) itype = HOR_PRED;
976  if(itype == DC_PRED) itype = LEFT_DC_PRED;
977  }else if(!left){
978  if(itype == HOR_PRED) itype = VERT_PRED;
979  if(itype == DC_PRED) itype = TOP_DC_PRED;
981  }
982  if(!down){
984  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
985  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
986  }
987  if(!right && up){
988  topleft = dst[-stride + 3] * 0x01010101u;
989  prev = (uint8_t*)&topleft;
990  }
991  r->h.pred4x4[itype](dst, prev, stride);
992 }
993 
994 static inline int adjust_pred16(int itype, int up, int left)
995 {
996  if(!up && !left)
997  itype = DC_128_PRED8x8;
998  else if(!up){
999  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1000  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1001  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1002  }else if(!left){
1003  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1004  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1005  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1006  }
1007  return itype;
1008 }
1009 
1011  uint8_t *pdst, int stride,
1012  int fc, int sc, int q_dc, int q_ac)
1013 {
1014  MpegEncContext *s = &r->s;
1015  int16_t *ptr = s->block[0];
1016  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1017  fc, sc, q_dc, q_ac, q_ac);
1018  if(has_ac){
1019  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1020  }else{
1021  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1022  ptr[0] = 0;
1023  }
1024 }
1025 
1026 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1027 {
1028  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1029  MpegEncContext *s = &r->s;
1030  GetBitContext *gb = &s->gb;
1031  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1032  q_ac = rv34_qscale_tab[s->qscale];
1033  uint8_t *dst = s->dest[0];
1034  int16_t *ptr = s->block[0];
1035  int i, j, itype, has_ac;
1036 
1037  memset(block16, 0, 16 * sizeof(*block16));
1038 
1039  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1040  if(has_ac)
1041  r->rdsp.rv34_inv_transform(block16);
1042  else
1043  r->rdsp.rv34_inv_transform_dc(block16);
1044 
1045  itype = ittrans16[intra_types[0]];
1046  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1047  r->h.pred16x16[itype](dst, s->linesize);
1048 
1049  for(j = 0; j < 4; j++){
1050  for(i = 0; i < 4; i++, cbp >>= 1){
1051  int dc = block16[i + j*4];
1052 
1053  if(cbp & 1){
1054  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1055  }else
1056  has_ac = 0;
1057 
1058  if(has_ac){
1059  ptr[0] = dc;
1060  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1061  }else
1062  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1063  }
1064 
1065  dst += 4*s->linesize;
1066  }
1067 
1068  itype = ittrans16[intra_types[0]];
1069  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1070  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1071 
1072  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1073  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1074 
1075  for(j = 1; j < 3; j++){
1076  dst = s->dest[j];
1077  r->h.pred8x8[itype](dst, s->uvlinesize);
1078  for(i = 0; i < 4; i++, cbp >>= 1){
1079  uint8_t *pdst;
1080  if(!(cbp & 1)) continue;
1081  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1082 
1083  rv34_process_block(r, pdst, s->uvlinesize,
1084  r->chroma_vlc, 1, q_dc, q_ac);
1085  }
1086  }
1087 }
1088 
1089 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1090 {
1091  MpegEncContext *s = &r->s;
1092  uint8_t *dst = s->dest[0];
1093  int avail[6*8] = {0};
1094  int i, j, k;
1095  int idx, q_ac, q_dc;
1096 
1097  // Set neighbour information.
1098  if(r->avail_cache[1])
1099  avail[0] = 1;
1100  if(r->avail_cache[2])
1101  avail[1] = avail[2] = 1;
1102  if(r->avail_cache[3])
1103  avail[3] = avail[4] = 1;
1104  if(r->avail_cache[4])
1105  avail[5] = 1;
1106  if(r->avail_cache[5])
1107  avail[8] = avail[16] = 1;
1108  if(r->avail_cache[9])
1109  avail[24] = avail[32] = 1;
1110 
1111  q_ac = rv34_qscale_tab[s->qscale];
1112  for(j = 0; j < 4; j++){
1113  idx = 9 + j*8;
1114  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1115  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1116  avail[idx] = 1;
1117  if(!(cbp & 1)) continue;
1118 
1119  rv34_process_block(r, dst, s->linesize,
1120  r->luma_vlc, 0, q_ac, q_ac);
1121  }
1122  dst += s->linesize * 4 - 4*4;
1123  intra_types += r->intra_types_stride;
1124  }
1125 
1126  intra_types -= r->intra_types_stride * 4;
1127 
1128  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1129  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1130 
1131  for(k = 0; k < 2; k++){
1132  dst = s->dest[1+k];
1133  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1134 
1135  for(j = 0; j < 2; j++){
1136  int* acache = r->avail_cache + 6 + j*4;
1137  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1138  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1139  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1140  acache[0] = 1;
1141 
1142  if(!(cbp&1)) continue;
1143 
1144  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1145  r->chroma_vlc, 1, q_dc, q_ac);
1146  }
1147 
1148  dst += 4*s->uvlinesize;
1149  }
1150  }
1151 }
1152 
1153 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1154 {
1155  int d;
1156  d = motion_val[0][0] - motion_val[-step][0];
1157  if(d < -3 || d > 3)
1158  return 1;
1159  d = motion_val[0][1] - motion_val[-step][1];
1160  if(d < -3 || d > 3)
1161  return 1;
1162  return 0;
1163 }
1164 
1166 {
1167  MpegEncContext *s = &r->s;
1168  int hmvmask = 0, vmvmask = 0, i, j;
1169  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1170  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1171  for(j = 0; j < 16; j += 8){
1172  for(i = 0; i < 2; i++){
1173  if(is_mv_diff_gt_3(motion_val + i, 1))
1174  vmvmask |= 0x11 << (j + i*2);
1175  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1176  hmvmask |= 0x03 << (j + i*2);
1177  }
1178  motion_val += s->b8_stride;
1179  }
1180  if(s->first_slice_line)
1181  hmvmask &= ~0x000F;
1182  if(!s->mb_x)
1183  vmvmask &= ~0x1111;
1184  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1185  vmvmask |= (vmvmask & 0x4444) >> 1;
1186  hmvmask |= (hmvmask & 0x0F00) >> 4;
1187  if(s->mb_x)
1188  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1189  if(!s->first_slice_line)
1190  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1191  }
1192  return hmvmask | vmvmask;
1193 }
1194 
1195 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1196 {
1197  MpegEncContext *s = &r->s;
1198  GetBitContext *gb = &s->gb;
1199  uint8_t *dst = s->dest[0];
1200  int16_t *ptr = s->block[0];
1201  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1202  int cbp, cbp2;
1203  int q_dc, q_ac, has_ac;
1204  int i, j;
1205  int dist;
1206 
1207  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1208  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1209  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1210  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1211  if(s->mb_x && dist)
1212  r->avail_cache[5] =
1213  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1214  if(dist >= s->mb_width)
1215  r->avail_cache[2] =
1216  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1217  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1218  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1219  if(s->mb_x && dist > s->mb_width)
1220  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1221 
1222  s->qscale = r->si.quant;
1223  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1224  r->cbp_luma [mb_pos] = cbp;
1225  r->cbp_chroma[mb_pos] = cbp >> 16;
1226  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1227  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1228 
1229  if(cbp == -1)
1230  return -1;
1231 
1232  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1233  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1234  else rv34_output_intra(r, intra_types, cbp);
1235  return 0;
1236  }
1237 
1238  if(r->is16){
1239  // Only for RV34_MB_P_MIX16x16
1240  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1241  memset(block16, 0, 16 * sizeof(*block16));
1242  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1243  q_ac = rv34_qscale_tab[s->qscale];
1244  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1245  r->rdsp.rv34_inv_transform(block16);
1246  else
1247  r->rdsp.rv34_inv_transform_dc(block16);
1248 
1249  q_ac = rv34_qscale_tab[s->qscale];
1250 
1251  for(j = 0; j < 4; j++){
1252  for(i = 0; i < 4; i++, cbp >>= 1){
1253  int dc = block16[i + j*4];
1254 
1255  if(cbp & 1){
1256  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1257  }else
1258  has_ac = 0;
1259 
1260  if(has_ac){
1261  ptr[0] = dc;
1262  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1263  }else
1264  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1265  }
1266 
1267  dst += 4*s->linesize;
1268  }
1269 
1270  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1271  }else{
1272  q_ac = rv34_qscale_tab[s->qscale];
1273 
1274  for(j = 0; j < 4; j++){
1275  for(i = 0; i < 4; i++, cbp >>= 1){
1276  if(!(cbp & 1)) continue;
1277 
1278  rv34_process_block(r, dst + 4*i, s->linesize,
1279  r->luma_vlc, 0, q_ac, q_ac);
1280  }
1281  dst += 4*s->linesize;
1282  }
1283  }
1284 
1285  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1286  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1287 
1288  for(j = 1; j < 3; j++){
1289  dst = s->dest[j];
1290  for(i = 0; i < 4; i++, cbp >>= 1){
1291  uint8_t *pdst;
1292  if(!(cbp & 1)) continue;
1293  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1294 
1295  rv34_process_block(r, pdst, s->uvlinesize,
1296  r->chroma_vlc, 1, q_dc, q_ac);
1297  }
1298  }
1299 
1300  return 0;
1301 }
1302 
1303 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1304 {
1305  MpegEncContext *s = &r->s;
1306  int cbp, dist;
1307  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1308 
1309  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1310  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1311  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1312  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1313  if(s->mb_x && dist)
1314  r->avail_cache[5] =
1315  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1316  if(dist >= s->mb_width)
1317  r->avail_cache[2] =
1318  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1319  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1320  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1321  if(s->mb_x && dist > s->mb_width)
1322  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1323 
1324  s->qscale = r->si.quant;
1325  cbp = rv34_decode_intra_mb_header(r, intra_types);
1326  r->cbp_luma [mb_pos] = cbp;
1327  r->cbp_chroma[mb_pos] = cbp >> 16;
1328  r->deblock_coefs[mb_pos] = 0xFFFF;
1329  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1330 
1331  if(cbp == -1)
1332  return -1;
1333 
1334  if(r->is16){
1335  rv34_output_i16x16(r, intra_types, cbp);
1336  return 0;
1337  }
1338 
1339  rv34_output_intra(r, intra_types, cbp);
1340  return 0;
1341 }
1342 
1344 {
1345  int bits;
1346  if(s->mb_y >= s->mb_height)
1347  return 1;
1348  if(!s->mb_num_left)
1349  return 1;
1350  if(r->s.mb_skip_run > 1)
1351  return 0;
1352  bits = get_bits_left(&s->gb);
1353  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1354  return 1;
1355  return 0;
1356 }
1357 
1358 
1360 {
1361  av_freep(&r->intra_types_hist);
1362  r->intra_types = NULL;
1363  av_freep(&r->tmp_b_block_base);
1364  av_freep(&r->mb_type);
1365  av_freep(&r->cbp_luma);
1366  av_freep(&r->cbp_chroma);
1367  av_freep(&r->deblock_coefs);
1368 }
1369 
1370 
1372 {
1373  r->intra_types_stride = r->s.mb_width * 4 + 4;
1374 
1375  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1376  sizeof(*r->cbp_chroma));
1377  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1378  sizeof(*r->cbp_luma));
1379  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1380  sizeof(*r->deblock_coefs));
1381  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1382  sizeof(*r->intra_types_hist));
1383  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1384  sizeof(*r->mb_type));
1385 
1386  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1387  r->intra_types_hist && r->mb_type)) {
1388  r->s.context_reinit = 1;
1390  return AVERROR(ENOMEM);
1391  }
1392 
1393  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1394 
1395  return 0;
1396 }
1397 
1398 
1400 {
1402  return rv34_decoder_alloc(r);
1403 }
1404 
1405 
1406 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1407 {
1408  MpegEncContext *s = &r->s;
1409  GetBitContext *gb = &s->gb;
1410  int mb_pos, slice_type;
1411  int res;
1412 
1413  init_get_bits(&r->s.gb, buf, buf_size*8);
1414  res = r->parse_slice_header(r, gb, &r->si);
1415  if(res < 0){
1416  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1417  return -1;
1418  }
1419 
1420  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1421  if (slice_type != s->pict_type) {
1422  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1423  return AVERROR_INVALIDDATA;
1424  }
1425  if (s->width != r->si.width || s->height != r->si.height) {
1426  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1427  return AVERROR_INVALIDDATA;
1428  }
1429 
1430  r->si.end = end;
1431  s->qscale = r->si.quant;
1432  s->mb_num_left = r->si.end - r->si.start;
1433  r->s.mb_skip_run = 0;
1434 
1435  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1436  if(r->si.start != mb_pos){
1437  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1438  s->mb_x = r->si.start % s->mb_width;
1439  s->mb_y = r->si.start / s->mb_width;
1440  }
1441  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1442  s->first_slice_line = 1;
1443  s->resync_mb_x = s->mb_x;
1444  s->resync_mb_y = s->mb_y;
1445 
1447  while(!check_slice_end(r, s)) {
1449 
1450  if(r->si.type)
1451  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1452  else
1453  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1454  if(res < 0){
1455  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1456  return -1;
1457  }
1458  if (++s->mb_x == s->mb_width) {
1459  s->mb_x = 0;
1460  s->mb_y++;
1462 
1463  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1464  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1465 
1466  if(r->loop_filter && s->mb_y >= 2)
1467  r->loop_filter(r, s->mb_y - 2);
1468 
1469  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1470  ff_thread_report_progress(&s->current_picture_ptr->tf,
1471  s->mb_y - 2, 0);
1472 
1473  }
1474  if(s->mb_x == s->resync_mb_x)
1475  s->first_slice_line=0;
1476  s->mb_num_left--;
1477  }
1478  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1479 
1480  return s->mb_y == s->mb_height;
1481 }
1482 
1483 /** @} */ // reconstruction group end
1484 
1485 /**
1486  * Initialize decoder.
1487  */
1489 {
1490  static AVOnce init_static_once = AV_ONCE_INIT;
1491  RV34DecContext *r = avctx->priv_data;
1492  MpegEncContext *s = &r->s;
1493  int ret;
1494 
1495  ff_mpv_decode_init(s, avctx);
1496  s->out_format = FMT_H263;
1497 
1498  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1499  avctx->has_b_frames = 1;
1500  s->low_delay = 0;
1501 
1503  if ((ret = ff_mpv_common_init(s)) < 0)
1504  return ret;
1505 
1506  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1507 
1508  if ((ret = rv34_decoder_alloc(r)) < 0) {
1509  ff_mpv_common_end(&r->s);
1510  return ret;
1511  }
1512 
1513  ff_thread_once(&init_static_once, rv34_init_tables);
1514 
1515  return 0;
1516 }
1517 
1519 {
1520  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1521  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1522  int err;
1523 
1524  if (dst == src || !s1->context_initialized)
1525  return 0;
1526 
1527  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
1528  s->height = s1->height;
1529  s->width = s1->width;
1530  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1531  return err;
1532  if ((err = rv34_decoder_realloc(r)) < 0)
1533  return err;
1534  }
1535 
1536  r->cur_pts = r1->cur_pts;
1537  r->last_pts = r1->last_pts;
1538  r->next_pts = r1->next_pts;
1539 
1540  memset(&r->si, 0, sizeof(r->si));
1541 
1542  // Do no call ff_mpeg_update_thread_context on a partially initialized
1543  // decoder context.
1544  if (!s1->context_initialized)
1545  return 0;
1546 
1547  return ff_mpeg_update_thread_context(dst, src);
1548 }
1549 
1550 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1551 {
1552  if (n < slice_count) {
1553  if(avctx->slice_count) return avctx->slice_offset[n];
1554  else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1555  } else
1556  return buf_size;
1557 }
1558 
1559 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1560 {
1561  RV34DecContext *r = avctx->priv_data;
1562  MpegEncContext *s = &r->s;
1563  int got_picture = 0, ret;
1564 
1565  ff_er_frame_end(&s->er);
1567  s->mb_num_left = 0;
1568 
1569  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1570  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1571 
1572  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1573  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1574  return ret;
1575  ff_print_debug_info(s, s->current_picture_ptr, pict);
1576  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1577  got_picture = 1;
1578  } else if (s->last_picture_ptr) {
1579  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1580  return ret;
1581  ff_print_debug_info(s, s->last_picture_ptr, pict);
1582  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1583  got_picture = 1;
1584  }
1585 
1586  return got_picture;
1587 }
1588 
1589 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1590 {
1591  // attempt to keep aspect during typical resolution switches
1592  if (!sar.num)
1593  sar = (AVRational){1, 1};
1594 
1595  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1596  return sar;
1597 }
1598 
1600  int *got_picture_ptr, AVPacket *avpkt)
1601 {
1602  const uint8_t *buf = avpkt->data;
1603  int buf_size = avpkt->size;
1604  RV34DecContext *r = avctx->priv_data;
1605  MpegEncContext *s = &r->s;
1606  SliceInfo si;
1607  int i, ret;
1608  int slice_count;
1609  const uint8_t *slices_hdr = NULL;
1610  int last = 0;
1611  int faulty_b = 0;
1612  int offset;
1613 
1614  /* no supplementary picture */
1615  if (buf_size == 0) {
1616  /* special case for last picture */
1617  if (s->low_delay==0 && s->next_picture_ptr) {
1618  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1619  return ret;
1620  s->next_picture_ptr = NULL;
1621 
1622  *got_picture_ptr = 1;
1623  }
1624  return 0;
1625  }
1626 
1627  if(!avctx->slice_count){
1628  slice_count = (*buf++) + 1;
1629  slices_hdr = buf + 4;
1630  buf += 8 * slice_count;
1631  buf_size -= 1 + 8 * slice_count;
1632  }else
1633  slice_count = avctx->slice_count;
1634 
1635  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1636  //parse first slice header to check whether this frame can be decoded
1637  if(offset < 0 || offset > buf_size){
1638  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1639  return AVERROR_INVALIDDATA;
1640  }
1641  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1642  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1643  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1644  return AVERROR_INVALIDDATA;
1645  }
1646  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1647  si.type == AV_PICTURE_TYPE_B) {
1648  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1649  "reference data.\n");
1650  faulty_b = 1;
1651  }
1652  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1653  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1654  || avctx->skip_frame >= AVDISCARD_ALL)
1655  return avpkt->size;
1656 
1657  /* first slice */
1658  if (si.start == 0) {
1659  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1660  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1661  s->mb_num_left);
1662  if (!s->context_reinit)
1663  ff_er_frame_end(&s->er);
1665  }
1666 
1667  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1668  int err;
1669 
1670  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1671  si.width, si.height);
1672 
1673  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1674  return AVERROR_INVALIDDATA;
1675 
1676  s->avctx->sample_aspect_ratio = update_sar(
1677  s->width, s->height, s->avctx->sample_aspect_ratio,
1678  si.width, si.height);
1679  s->width = si.width;
1680  s->height = si.height;
1681 
1682  err = ff_set_dimensions(s->avctx, s->width, s->height);
1683  if (err < 0)
1684  return err;
1685  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1686  return err;
1687  if ((err = rv34_decoder_realloc(r)) < 0)
1688  return err;
1689  }
1690  if (faulty_b)
1691  return AVERROR_INVALIDDATA;
1692  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1693  if (ff_mpv_frame_start(s, s->avctx) < 0)
1694  return -1;
1696  if (!r->tmp_b_block_base) {
1697  int i;
1698 
1699  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1700  for (i = 0; i < 2; i++)
1701  r->tmp_b_block_y[i] = r->tmp_b_block_base
1702  + i * 16 * s->linesize;
1703  for (i = 0; i < 4; i++)
1704  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1705  + (i >> 1) * 8 * s->uvlinesize
1706  + (i & 1) * 16;
1707  }
1708  r->cur_pts = si.pts;
1709  if (s->pict_type != AV_PICTURE_TYPE_B) {
1710  r->last_pts = r->next_pts;
1711  r->next_pts = r->cur_pts;
1712  } else {
1713  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1714  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1715  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1716 
1717  if(!refdist){
1718  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1719  r->scaled_weight = 0;
1720  }else{
1721  if (FFMAX(dist0, dist1) > refdist)
1722  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1723 
1724  r->mv_weight1 = (dist0 << 14) / refdist;
1725  r->mv_weight2 = (dist1 << 14) / refdist;
1726  if((r->mv_weight1|r->mv_weight2) & 511){
1727  r->weight1 = r->mv_weight1;
1728  r->weight2 = r->mv_weight2;
1729  r->scaled_weight = 0;
1730  }else{
1731  r->weight1 = r->mv_weight1 >> 9;
1732  r->weight2 = r->mv_weight2 >> 9;
1733  r->scaled_weight = 1;
1734  }
1735  }
1736  }
1737  s->mb_x = s->mb_y = 0;
1738  ff_thread_finish_setup(s->avctx);
1739  } else if (s->context_reinit) {
1740  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1741  "reinitialize (start MB is %d).\n", si.start);
1742  return AVERROR_INVALIDDATA;
1743  } else if (HAVE_THREADS &&
1744  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1745  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1746  "multithreading mode (start MB is %d).\n", si.start);
1747  return AVERROR_INVALIDDATA;
1748  }
1749 
1750  for(i = 0; i < slice_count; i++){
1751  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1752  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1753  int size;
1754 
1755  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1756  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1757  break;
1758  }
1759  size = offset1 - offset;
1760 
1761  r->si.end = s->mb_width * s->mb_height;
1762  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1763 
1764  if(i+1 < slice_count){
1765  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1766  if (offset2 < offset1 || offset2 > buf_size) {
1767  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1768  break;
1769  }
1770  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1771  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1772  size = offset2 - offset;
1773  }else
1774  r->si.end = si.start;
1775  }
1776  av_assert0 (size >= 0 && size <= buf_size - offset);
1777  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1778  if(last)
1779  break;
1780  }
1781 
1782  if (s->current_picture_ptr) {
1783  if (last) {
1784  if(r->loop_filter)
1785  r->loop_filter(r, s->mb_height - 1);
1786 
1787  ret = finish_frame(avctx, pict);
1788  if (ret < 0)
1789  return ret;
1790  *got_picture_ptr = ret;
1791  } else if (HAVE_THREADS &&
1792  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1793  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1794  /* always mark the current frame as finished, frame-mt supports
1795  * only complete frames */
1796  ff_er_frame_end(&s->er);
1798  s->mb_num_left = 0;
1799  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1800  return AVERROR_INVALIDDATA;
1801  }
1802  }
1803 
1804  return avpkt->size;
1805 }
1806 
1808 {
1809  RV34DecContext *r = avctx->priv_data;
1810 
1811  ff_mpv_common_end(&r->s);
1813 
1814  return 0;
1815 }
RV34DecContext
decoder context
Definition: rv34.h:86
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:738
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:82
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:59
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1089
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1807
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:597
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
RV34VLC::third_pattern
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:70
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:960
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:847
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:643
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:324
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVCodecContext::slice_offset
int * slice_offset
slice offsets in the frame in bytes
Definition: avcodec.h:750
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1518
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:206
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1399
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1343
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:551
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1673
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1165
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:45
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:452
AVCodecContext::slice_count
int slice_count
slice count
Definition: avcodec.h:743
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
A
#define A(x)
Definition: vp56_arith.h:28
decode_subblock
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:235
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:994
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:49
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1371
U
#define U(x)
Definition: vp56_arith.h:37
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1695
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:461
GetBitContext
Definition: get_bits.h:61
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:52
val
static double val(void *priv, double ch)
Definition: aeval.c:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
quant
static int quant(float coef, const float Q, const float rounding)
Quantize one coefficient.
Definition: aacenc_utils.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:879
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:179
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:90
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1488
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:967
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1303
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:595
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:52
mask
static const uint16_t mask[17]
Definition: lzw.c:38
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:65
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:685
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:194
width
#define width
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:776
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1195
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:75
s
#define s(width, name)
Definition: cbs_vp9.c:256
RV34VLC::second_pattern
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:69
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:80
s1
#define s1
Definition: regdef.h:38
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:67
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:515
bits
uint8_t bits
Definition: vp3data.h:141
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:74
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
IS_INTRA
#define IS_INTRA(x, y)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1559
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:39
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:49
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
threadframe.h
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:177
decode_subblock3
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:259
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:510
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1406
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:127
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:51
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:50
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:76
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
RV34VLC::cbppattern
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:66
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:176
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:853
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:48
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
VLC::table_allocated
int table_allocated
Definition: vlc.h:34
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:215
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:826
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
f
f
Definition: af_crystalizer.c:122
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1153
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:50
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1599
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1589
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:54
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:27
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:276
height
#define height
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:69
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1026
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:46
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:545
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1474
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:81
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:593
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:119
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:449
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: vlc.c:272
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:37
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:807
rv34_gen_vlc
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:94
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:57
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo_dec.c:513
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
table_data
static VLCElem table_data[117592]
Definition: rv34.c:84
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:75
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1550
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:750
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
decode_subblock1
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:253
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
VLC::bits
int bits
Definition: vlc.h:32
mid_pred
#define mid_pred
Definition: mathops.h:97
ret
ret
Definition: filter_design.txt:187
INIT_VLC_STATIC_OVERLONG
#define INIT_VLC_STATIC_OVERLONG
Definition: vlc.h:101
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:785
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:51
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext
main external API structure.
Definition: avcodec.h:389
SliceInfo::start
int start
Definition: rv34.h:79
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
ThreadFrame
Definition: threadframe.h:27
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:383
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:446
VLC
Definition: vlc.h:31
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:954
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:814
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:498
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:281
VLC::table
VLCElem * table
Definition: vlc.h:33
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:348
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:81
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1010
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:56
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:34
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
RV34VLC::coefficient
VLC coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:71
RV34VLC::first_pattern
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:68
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:660
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:38
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:53
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:892
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:75
mpeg_er.h
d
d
Definition: ffmpeg_filter.c:153
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:80
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:52
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:47
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:336
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:55
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:182
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_print_debug_info
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:506
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
video_enc_params.h
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1359
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:525