FFmpeg
truemotion2.c
Go to the documentation of this file.
1 /*
2  * Duck/ON2 TrueMotion 2 Decoder
3  * Copyright (c) 2005 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Duck TrueMotion2 decoder.
25  */
26 
27 #include <inttypes.h>
28 
29 #include "avcodec.h"
30 #include "bswapdsp.h"
31 #include "bytestream.h"
32 #include "get_bits.h"
33 #include "internal.h"
34 
35 #define TM2_ESCAPE 0x80000000
36 #define TM2_DELTAS 64
37 
38 /* Huffman-coded streams of different types of blocks */
40  TM2_C_HI = 0,
48 };
49 
50 /* Block types */
51 enum TM2_BLOCKS {
59 };
60 
61 typedef struct TM2Context {
64 
66  int error;
68 
71 
72  /* TM2 streams */
77  /* for blocks decoding */
78  int D[4];
79  int CD[4];
80  int *last;
81  int *clast;
82 
83  /* data for current and previous frame */
85  int *Y1, *U1, *V1, *Y2, *U2, *V2;
87  int cur;
88 } TM2Context;
89 
90 /**
91 * Huffman codes for each of streams
92 */
93 typedef struct TM2Codes {
94  VLC vlc; ///< table for FFmpeg bitstream reader
95  int bits;
96  int *recode; ///< table for converting from code indexes to values
97  int length;
98 } TM2Codes;
99 
100 /**
101 * structure for gathering Huffman codes information
102 */
103 typedef struct TM2Huff {
104  int val_bits; ///< length of literal
105  int max_bits; ///< maximum length of code
106  int min_bits; ///< minimum length of code
107  int nodes; ///< total number of nodes in tree
108  int num; ///< current number filled
109  int max_num; ///< total number of codes
110  int *nums; ///< literals
111  uint32_t *bits; ///< codes
112  int *lens; ///< codelengths
113 } TM2Huff;
114 
115 /**
116  *
117  * @returns the length of the longest code or an AVERROR code
118  */
119 static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff)
120 {
121  int ret, ret2;
122  if (length > huff->max_bits) {
123  av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n",
124  huff->max_bits);
125  return AVERROR_INVALIDDATA;
126  }
127 
128  if (!get_bits1(&ctx->gb)) { /* literal */
129  if (length == 0) {
130  length = 1;
131  }
132  if (huff->num >= huff->max_num) {
133  av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
134  return AVERROR_INVALIDDATA;
135  }
136  huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits);
137  huff->bits[huff->num] = prefix;
138  huff->lens[huff->num] = length;
139  huff->num++;
140  return length;
141  } else { /* non-terminal node */
142  if ((ret2 = tm2_read_tree(ctx, prefix << 1, length + 1, huff)) < 0)
143  return ret2;
144  if ((ret = tm2_read_tree(ctx, (prefix << 1) | 1, length + 1, huff)) < 0)
145  return ret;
146  }
147  return FFMAX(ret, ret2);
148 }
149 
151 {
152  TM2Huff huff;
153  int res = 0;
154 
155  huff.val_bits = get_bits(&ctx->gb, 5);
156  huff.max_bits = get_bits(&ctx->gb, 5);
157  huff.min_bits = get_bits(&ctx->gb, 5);
158  huff.nodes = get_bits(&ctx->gb, 17);
159  huff.num = 0;
160 
161  /* check for correct codes parameters */
162  if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
163  (huff.max_bits < 0) || (huff.max_bits > 25)) {
164  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
165  "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
166  return AVERROR_INVALIDDATA;
167  }
168  if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
169  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
170  "nodes: %i\n", huff.nodes);
171  return AVERROR_INVALIDDATA;
172  }
173  /* one-node tree */
174  if (huff.max_bits == 0)
175  huff.max_bits = 1;
176 
177  /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
178  huff.max_num = (huff.nodes + 1) >> 1;
179  huff.nums = av_calloc(huff.max_num, sizeof(int));
180  huff.bits = av_calloc(huff.max_num, sizeof(uint32_t));
181  huff.lens = av_calloc(huff.max_num, sizeof(int));
182 
183  if (!huff.nums || !huff.bits || !huff.lens) {
184  res = AVERROR(ENOMEM);
185  goto out;
186  }
187 
188  res = tm2_read_tree(ctx, 0, 0, &huff);
189 
190  if (res >= 0 && res != huff.max_bits) {
191  av_log(ctx->avctx, AV_LOG_ERROR, "Got less bits than expected: %i of %i\n",
192  res, huff.max_bits);
193  res = AVERROR_INVALIDDATA;
194  }
195  if (huff.num != huff.max_num) {
196  av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
197  huff.num, huff.max_num);
198  res = AVERROR_INVALIDDATA;
199  }
200 
201  /* convert codes to vlc_table */
202  if (res >= 0) {
203  res = init_vlc(&code->vlc, huff.max_bits, huff.max_num,
204  huff.lens, sizeof(int), sizeof(int),
205  huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0);
206  if (res < 0)
207  av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
208  else {
209  code->bits = huff.max_bits;
210  code->length = huff.max_num;
211  code->recode = huff.nums;
212  huff.nums = NULL;
213  }
214  }
215 
216 out:
217  /* free allocated memory */
218  av_free(huff.nums);
219  av_free(huff.bits);
220  av_free(huff.lens);
221 
222  return res;
223 }
224 
226 {
227  av_free(code->recode);
228  if (code->vlc.table)
229  ff_free_vlc(&code->vlc);
230 }
231 
232 static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code)
233 {
234  int val;
235  val = get_vlc2(gb, code->vlc.table, code->bits, 1);
236  if(val<0)
237  return -1;
238  return code->recode[val];
239 }
240 
241 #define TM2_OLD_HEADER_MAGIC 0x00000100
242 #define TM2_NEW_HEADER_MAGIC 0x00000101
243 
244 static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
245 {
246  uint32_t magic = AV_RL32(buf);
247 
248  switch (magic) {
250  avpriv_request_sample(ctx->avctx, "Old TM2 header");
251  return 0;
253  return 0;
254  default:
255  av_log(ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08"PRIX32"\n",
256  magic);
257  return AVERROR_INVALIDDATA;
258  }
259 }
260 
261 static int tm2_read_deltas(TM2Context *ctx, int stream_id)
262 {
263  int d, mb;
264  int i, v;
265 
266  d = get_bits(&ctx->gb, 9);
267  mb = get_bits(&ctx->gb, 5);
268 
269  av_assert2(mb < 32);
270  if ((d < 1) || (d > TM2_DELTAS) || (mb < 1)) {
271  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
272  return AVERROR_INVALIDDATA;
273  }
274 
275  for (i = 0; i < d; i++) {
276  v = get_bits_long(&ctx->gb, mb);
277  if (v & (1 << (mb - 1)))
278  ctx->deltas[stream_id][i] = v - (1U << mb);
279  else
280  ctx->deltas[stream_id][i] = v;
281  }
282  for (; i < TM2_DELTAS; i++)
283  ctx->deltas[stream_id][i] = 0;
284 
285  return 0;
286 }
287 
288 static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
289 {
290  int i, ret;
291  int skip = 0;
292  int len, toks, pos;
293  TM2Codes codes;
294  GetByteContext gb;
295 
296  if (buf_size < 4) {
297  av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
298  return AVERROR_INVALIDDATA;
299  }
300 
301  /* get stream length in dwords */
302  bytestream2_init(&gb, buf, buf_size);
303  len = bytestream2_get_be32(&gb);
304 
305  if (len == 0)
306  return 4;
307 
308  if (len >= INT_MAX / 4 - 1 || len < 0 || len * 4 + 4 > buf_size) {
309  av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
310  return AVERROR_INVALIDDATA;
311  }
312  skip = len * 4 + 4;
313 
314  toks = bytestream2_get_be32(&gb);
315  if (toks & 1) {
316  len = bytestream2_get_be32(&gb);
317  if (len == TM2_ESCAPE) {
318  len = bytestream2_get_be32(&gb);
319  }
320  if (len > 0) {
321  pos = bytestream2_tell(&gb);
322  if (skip <= pos)
323  return AVERROR_INVALIDDATA;
324  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
325  if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
326  return ret;
327  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
328  }
329  }
330  /* skip unused fields */
331  len = bytestream2_get_be32(&gb);
332  if (len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
333  bytestream2_skip(&gb, 8); /* unused by decoder */
334  } else {
335  bytestream2_skip(&gb, 4); /* unused by decoder */
336  }
337 
338  pos = bytestream2_tell(&gb);
339  if (skip <= pos)
340  return AVERROR_INVALIDDATA;
341  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
342  if ((ret = tm2_build_huff_table(ctx, &codes)) < 0)
343  return ret;
344  bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
345 
346  toks >>= 1;
347  /* check if we have sane number of tokens */
348  if ((toks < 0) || (toks > 0xFFFFFF)) {
349  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
351  goto end;
352  }
353  ret = av_reallocp_array(&ctx->tokens[stream_id], toks, sizeof(int));
354  if (ret < 0) {
355  ctx->tok_lens[stream_id] = 0;
356  goto end;
357  }
358  ctx->tok_lens[stream_id] = toks;
359  len = bytestream2_get_be32(&gb);
360  if (len > 0) {
361  pos = bytestream2_tell(&gb);
362  if (skip <= pos) {
364  goto end;
365  }
366  init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
367  for (i = 0; i < toks; i++) {
368  if (get_bits_left(&ctx->gb) <= 0) {
369  av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
371  goto end;
372  }
373  ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
374  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS || ctx->tokens[stream_id][i]<0) {
375  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
376  ctx->tokens[stream_id][i], stream_id, i);
378  goto end;
379  }
380  }
381  } else {
382  if (len < 0) {
384  goto end;
385  }
386  for (i = 0; i < toks; i++) {
387  ctx->tokens[stream_id][i] = codes.recode[0];
388  if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
389  av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
390  ctx->tokens[stream_id][i], stream_id, i);
392  goto end;
393  }
394  }
395  }
396 
397  ret = skip;
398 
399 end:
400  tm2_free_codes(&codes);
401  return ret;
402 }
403 
404 static inline int GET_TOK(TM2Context *ctx,int type)
405 {
406  if (ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
407  av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
408  ctx->error = 1;
409  return 0;
410  }
411  if (type <= TM2_MOT) {
412  if (ctx->tokens[type][ctx->tok_ptrs[type]] >= TM2_DELTAS) {
413  av_log(ctx->avctx, AV_LOG_ERROR, "token %d is too large\n", ctx->tokens[type][ctx->tok_ptrs[type]]);
414  return 0;
415  }
416  return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
417  }
418  return ctx->tokens[type][ctx->tok_ptrs[type]++];
419 }
420 
421 /* blocks decoding routines */
422 
423 /* common Y, U, V pointers initialisation */
424 #define TM2_INIT_POINTERS() \
425  int *last, *clast; \
426  int *Y, *U, *V;\
427  int Ystride, Ustride, Vstride;\
428 \
429  Ystride = ctx->y_stride;\
430  Vstride = ctx->uv_stride;\
431  Ustride = ctx->uv_stride;\
432  Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
433  V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
434  U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
435  last = ctx->last + bx * 4;\
436  clast = ctx->clast + bx * 4;
437 
438 #define TM2_INIT_POINTERS_2() \
439  unsigned *Yo, *Uo, *Vo;\
440  int oYstride, oUstride, oVstride;\
441 \
442  TM2_INIT_POINTERS();\
443  oYstride = Ystride;\
444  oVstride = Vstride;\
445  oUstride = Ustride;\
446  Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
447  Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
448  Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
449 
450 /* recalculate last and delta values for next blocks */
451 #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
452  CD[0] = (unsigned)CHR[ 1] - (unsigned)last[1];\
453  CD[1] = (unsigned)CHR[stride + 1] - (unsigned) CHR[1];\
454  last[0] = (int)CHR[stride + 0];\
455  last[1] = (int)CHR[stride + 1];}
456 
457 /* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
458 static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)
459 {
460  unsigned ct, d;
461  int i, j;
462 
463  for (j = 0; j < 4; j++){
464  ct = ctx->D[j];
465  for (i = 0; i < 4; i++){
466  d = deltas[i + j * 4];
467  ct += d;
468  last[i] += ct;
469  Y[i] = av_clip_uint8(last[i]);
470  }
471  Y += stride;
472  ctx->D[j] = ct;
473  }
474 }
475 
476 static inline void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
477 {
478  int i, j;
479  for (j = 0; j < 2; j++) {
480  for (i = 0; i < 2; i++) {
481  CD[j] += deltas[i + j * 2];
482  last[i] += CD[j];
483  data[i] = last[i];
484  }
485  data += stride;
486  }
487 }
488 
489 static inline void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
490 {
491  int t;
492  int l;
493  int prev;
494 
495  if (bx > 0)
496  prev = clast[-3];
497  else
498  prev = 0;
499  t = (int)(CD[0] + CD[1]) >> 1;
500  l = (int)(prev - CD[0] - CD[1] + clast[1]) >> 1;
501  CD[1] = CD[0] + CD[1] - t;
502  CD[0] = t;
503  clast[0] = l;
504 
505  tm2_high_chroma(data, stride, clast, CD, deltas);
506 }
507 
508 static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
509 {
510  int i;
511  int deltas[16];
513 
514  /* hi-res chroma */
515  for (i = 0; i < 4; i++) {
516  deltas[i] = GET_TOK(ctx, TM2_C_HI);
517  deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
518  }
519  tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas);
520  tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4);
521 
522  /* hi-res luma */
523  for (i = 0; i < 16; i++)
524  deltas[i] = GET_TOK(ctx, TM2_L_HI);
525 
526  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
527 }
528 
529 static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
530 {
531  int i;
532  int deltas[16];
534 
535  /* low-res chroma */
536  deltas[0] = GET_TOK(ctx, TM2_C_LO);
537  deltas[1] = deltas[2] = deltas[3] = 0;
538  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
539 
540  deltas[0] = GET_TOK(ctx, TM2_C_LO);
541  deltas[1] = deltas[2] = deltas[3] = 0;
542  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
543 
544  /* hi-res luma */
545  for (i = 0; i < 16; i++)
546  deltas[i] = GET_TOK(ctx, TM2_L_HI);
547 
548  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
549 }
550 
551 static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
552 {
553  int i;
554  int t1, t2;
555  int deltas[16];
557 
558  /* low-res chroma */
559  deltas[0] = GET_TOK(ctx, TM2_C_LO);
560  deltas[1] = deltas[2] = deltas[3] = 0;
561  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
562 
563  deltas[0] = GET_TOK(ctx, TM2_C_LO);
564  deltas[1] = deltas[2] = deltas[3] = 0;
565  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
566 
567  /* low-res luma */
568  for (i = 0; i < 16; i++)
569  deltas[i] = 0;
570 
571  deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
572  deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
573  deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
574  deltas[10] = GET_TOK(ctx, TM2_L_LO);
575 
576  if (bx > 0)
577  last[0] = (int)((unsigned)last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;
578  else
579  last[0] = (int)((unsigned)last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
580  last[2] = (int)((unsigned)last[1] + last[3]) >> 1;
581 
582  t1 = ctx->D[0] + (unsigned)ctx->D[1];
583  ctx->D[0] = t1 >> 1;
584  ctx->D[1] = t1 - (t1 >> 1);
585  t2 = ctx->D[2] + (unsigned)ctx->D[3];
586  ctx->D[2] = t2 >> 1;
587  ctx->D[3] = t2 - (t2 >> 1);
588 
589  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
590 }
591 
592 static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
593 {
594  int i;
595  int ct;
596  unsigned left, right;
597  int diff;
598  int deltas[16];
600 
601  /* null chroma */
602  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
603  tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
604 
605  deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
606  tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
607 
608  /* null luma */
609  for (i = 0; i < 16; i++)
610  deltas[i] = 0;
611 
612  ct = (unsigned)ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];
613 
614  if (bx > 0)
615  left = last[-1] - (unsigned)ct;
616  else
617  left = 0;
618 
619  right = last[3];
620  diff = right - left;
621  last[0] = left + (diff >> 2);
622  last[1] = left + (diff >> 1);
623  last[2] = right - (diff >> 2);
624  last[3] = right;
625  {
626  unsigned tp = left;
627 
628  ctx->D[0] = (tp + (ct >> 2)) - left;
629  left += ctx->D[0];
630  ctx->D[1] = (tp + (ct >> 1)) - left;
631  left += ctx->D[1];
632  ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;
633  left += ctx->D[2];
634  ctx->D[3] = (tp + ct) - left;
635  }
636  tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
637 }
638 
639 static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
640 {
641  int i, j;
643 
644  /* update chroma */
645  for (j = 0; j < 2; j++) {
646  for (i = 0; i < 2; i++){
647  U[i] = Uo[i];
648  V[i] = Vo[i];
649  }
650  U += Ustride; V += Vstride;
651  Uo += oUstride; Vo += oVstride;
652  }
653  U -= Ustride * 2;
654  V -= Vstride * 2;
655  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
656  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
657 
658  /* update deltas */
659  ctx->D[0] = Yo[3] - last[3];
660  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
661  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
662  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
663 
664  for (j = 0; j < 4; j++) {
665  for (i = 0; i < 4; i++) {
666  Y[i] = Yo[i];
667  last[i] = Yo[i];
668  }
669  Y += Ystride;
670  Yo += oYstride;
671  }
672 }
673 
674 static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
675 {
676  int i, j;
677  unsigned d;
679 
680  /* update chroma */
681  for (j = 0; j < 2; j++) {
682  for (i = 0; i < 2; i++) {
683  U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD);
684  V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD);
685  }
686  U += Ustride;
687  V += Vstride;
688  Uo += oUstride;
689  Vo += oVstride;
690  }
691  U -= Ustride * 2;
692  V -= Vstride * 2;
693  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
694  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
695 
696  /* update deltas */
697  ctx->D[0] = Yo[3] - last[3];
698  ctx->D[1] = Yo[3 + oYstride] - Yo[3];
699  ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
700  ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
701 
702  for (j = 0; j < 4; j++) {
703  d = last[3];
704  for (i = 0; i < 4; i++) {
705  Y[i] = Yo[i] + (unsigned)GET_TOK(ctx, TM2_UPD);
706  last[i] = Y[i];
707  }
708  ctx->D[j] = last[3] - d;
709  Y += Ystride;
710  Yo += oYstride;
711  }
712 }
713 
714 static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
715 {
716  int i, j;
717  int mx, my;
719 
720  mx = GET_TOK(ctx, TM2_MOT);
721  my = GET_TOK(ctx, TM2_MOT);
722  mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
723  my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
724 
725  if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
726  av_log(ctx->avctx, AV_LOG_ERROR, "MV out of picture\n");
727  return;
728  }
729 
730  Yo += my * oYstride + mx;
731  Uo += (my >> 1) * oUstride + (mx >> 1);
732  Vo += (my >> 1) * oVstride + (mx >> 1);
733 
734  /* copy chroma */
735  for (j = 0; j < 2; j++) {
736  for (i = 0; i < 2; i++) {
737  U[i] = Uo[i];
738  V[i] = Vo[i];
739  }
740  U += Ustride;
741  V += Vstride;
742  Uo += oUstride;
743  Vo += oVstride;
744  }
745  U -= Ustride * 2;
746  V -= Vstride * 2;
747  TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
748  TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
749 
750  /* copy luma */
751  for (j = 0; j < 4; j++) {
752  for (i = 0; i < 4; i++) {
753  Y[i] = Yo[i];
754  }
755  Y += Ystride;
756  Yo += oYstride;
757  }
758  /* calculate deltas */
759  Y -= Ystride * 4;
760  ctx->D[0] = (unsigned)Y[3] - last[3];
761  ctx->D[1] = (unsigned)Y[3 + Ystride] - Y[3];
762  ctx->D[2] = (unsigned)Y[3 + Ystride * 2] - Y[3 + Ystride];
763  ctx->D[3] = (unsigned)Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
764  for (i = 0; i < 4; i++)
765  last[i] = Y[i + Ystride * 3];
766 }
767 
769 {
770  int i, j;
771  int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
772  int type;
773  int keyframe = 1;
774  int *Y, *U, *V;
775  uint8_t *dst;
776 
777  for (i = 0; i < TM2_NUM_STREAMS; i++)
778  ctx->tok_ptrs[i] = 0;
779 
780  if (ctx->tok_lens[TM2_TYPE]<bw*bh) {
781  av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
782  return AVERROR_INVALIDDATA;
783  }
784 
785  memset(ctx->last, 0, 4 * bw * sizeof(int));
786  memset(ctx->clast, 0, 4 * bw * sizeof(int));
787 
788  for (j = 0; j < bh; j++) {
789  memset(ctx->D, 0, 4 * sizeof(int));
790  memset(ctx->CD, 0, 4 * sizeof(int));
791  for (i = 0; i < bw; i++) {
792  type = GET_TOK(ctx, TM2_TYPE);
793  switch(type) {
794  case TM2_HI_RES:
795  tm2_hi_res_block(ctx, p, i, j);
796  break;
797  case TM2_MED_RES:
798  tm2_med_res_block(ctx, p, i, j);
799  break;
800  case TM2_LOW_RES:
801  tm2_low_res_block(ctx, p, i, j);
802  break;
803  case TM2_NULL_RES:
804  tm2_null_res_block(ctx, p, i, j);
805  break;
806  case TM2_UPDATE:
807  tm2_update_block(ctx, p, i, j);
808  keyframe = 0;
809  break;
810  case TM2_STILL:
811  tm2_still_block(ctx, p, i, j);
812  keyframe = 0;
813  break;
814  case TM2_MOTION:
815  tm2_motion_block(ctx, p, i, j);
816  keyframe = 0;
817  break;
818  default:
819  av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
820  }
821  if (ctx->error)
822  return AVERROR_INVALIDDATA;
823  }
824  }
825 
826  /* copy data from our buffer to AVFrame */
827  Y = (ctx->cur?ctx->Y2:ctx->Y1);
828  U = (ctx->cur?ctx->U2:ctx->U1);
829  V = (ctx->cur?ctx->V2:ctx->V1);
830  dst = p->data[0];
831  for (j = 0; j < h; j++) {
832  for (i = 0; i < w; i++) {
833  unsigned y = Y[i], u = U[i >> 1], v = V[i >> 1];
834  dst[3*i+0] = av_clip_uint8(y + v);
835  dst[3*i+1] = av_clip_uint8(y);
836  dst[3*i+2] = av_clip_uint8(y + u);
837  }
838 
839  /* horizontal edge extension */
840  Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
841  Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
842 
843  /* vertical edge extension */
844  if (j == 0) {
845  memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
846  memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
847  memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
848  memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
849  } else if (j == h - 1) {
850  memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
851  memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
852  memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
853  memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
854  }
855 
856  Y += ctx->y_stride;
857  if (j & 1) {
858  /* horizontal edge extension */
859  U[-2] = U[-1] = U[0];
860  V[-2] = V[-1] = V[0];
861  U[cw + 1] = U[cw] = U[cw - 1];
862  V[cw + 1] = V[cw] = V[cw - 1];
863 
864  /* vertical edge extension */
865  if (j == 1) {
866  memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
867  memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
868  memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
869  memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
870  } else if (j == h - 1) {
871  memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
872  memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
873  memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
874  memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
875  }
876 
877  U += ctx->uv_stride;
878  V += ctx->uv_stride;
879  }
880  dst += p->linesize[0];
881  }
882 
883  return keyframe;
884 }
885 
886 static const int tm2_stream_order[TM2_NUM_STREAMS] = {
888 };
889 
890 #define TM2_HEADER_SIZE 40
891 
892 static int decode_frame(AVCodecContext *avctx,
893  void *data, int *got_frame,
894  AVPacket *avpkt)
895 {
896  TM2Context * const l = avctx->priv_data;
897  const uint8_t *buf = avpkt->data;
898  int buf_size = avpkt->size & ~3;
899  AVFrame * const p = l->pic;
900  int offset = TM2_HEADER_SIZE;
901  int i, t, ret;
902 
903  l->error = 0;
904 
905  av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
906  if (!l->buffer) {
907  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
908  return AVERROR(ENOMEM);
909  }
910 
911  if ((ret = ff_reget_buffer(avctx, p, 0)) < 0)
912  return ret;
913 
914  l->bdsp.bswap_buf((uint32_t *) l->buffer, (const uint32_t *) buf,
915  buf_size >> 2);
916 
917  if ((ret = tm2_read_header(l, l->buffer)) < 0) {
918  return ret;
919  }
920 
921  for (i = 0; i < TM2_NUM_STREAMS; i++) {
922  if (offset >= buf_size) {
923  av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
924  return AVERROR_INVALIDDATA;
925  }
926 
928  buf_size - offset);
929  if (t < 0) {
930  int j = tm2_stream_order[i];
931  if (l->tok_lens[j])
932  memset(l->tokens[j], 0, sizeof(**l->tokens) * l->tok_lens[j]);
933  return t;
934  }
935  offset += t;
936  }
937  p->key_frame = tm2_decode_blocks(l, p);
938  if (p->key_frame)
940  else
942 
943  l->cur = !l->cur;
944  *got_frame = 1;
945  ret = av_frame_ref(data, l->pic);
946 
947  return (ret < 0) ? ret : buf_size;
948 }
949 
951 {
952  TM2Context * const l = avctx->priv_data;
953  int i, w = avctx->width, h = avctx->height;
954 
955  if ((avctx->width & 3) || (avctx->height & 3)) {
956  av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
957  return AVERROR(EINVAL);
958  }
959 
960  l->avctx = avctx;
961  avctx->pix_fmt = AV_PIX_FMT_BGR24;
962 
963  l->pic = av_frame_alloc();
964  if (!l->pic)
965  return AVERROR(ENOMEM);
966 
967  ff_bswapdsp_init(&l->bdsp);
968 
969  l->last = av_malloc_array(w >> 2, 4 * sizeof(*l->last) );
970  l->clast = av_malloc_array(w >> 2, 4 * sizeof(*l->clast));
971 
972  for (i = 0; i < TM2_NUM_STREAMS; i++) {
973  l->tokens[i] = NULL;
974  l->tok_lens[i] = 0;
975  }
976 
977  w += 8;
978  h += 8;
979  l->Y1_base = av_calloc(w * h, sizeof(*l->Y1_base));
980  l->Y2_base = av_calloc(w * h, sizeof(*l->Y2_base));
981  l->y_stride = w;
982  w = (w + 1) >> 1;
983  h = (h + 1) >> 1;
984  l->U1_base = av_calloc(w * h, sizeof(*l->U1_base));
985  l->V1_base = av_calloc(w * h, sizeof(*l->V1_base));
986  l->U2_base = av_calloc(w * h, sizeof(*l->U2_base));
987  l->V2_base = av_calloc(w * h, sizeof(*l->V1_base));
988  l->uv_stride = w;
989  l->cur = 0;
990  if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
991  !l->V1_base || !l->U2_base || !l->V2_base ||
992  !l->last || !l->clast) {
993  av_freep(&l->Y1_base);
994  av_freep(&l->Y2_base);
995  av_freep(&l->U1_base);
996  av_freep(&l->U2_base);
997  av_freep(&l->V1_base);
998  av_freep(&l->V2_base);
999  av_freep(&l->last);
1000  av_freep(&l->clast);
1001  av_frame_free(&l->pic);
1002  return AVERROR(ENOMEM);
1003  }
1004  l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
1005  l->Y2 = l->Y2_base + l->y_stride * 4 + 4;
1006  l->U1 = l->U1_base + l->uv_stride * 2 + 2;
1007  l->U2 = l->U2_base + l->uv_stride * 2 + 2;
1008  l->V1 = l->V1_base + l->uv_stride * 2 + 2;
1009  l->V2 = l->V2_base + l->uv_stride * 2 + 2;
1010 
1011  return 0;
1012 }
1013 
1015 {
1016  TM2Context * const l = avctx->priv_data;
1017  int i;
1018 
1019  av_free(l->last);
1020  av_free(l->clast);
1021  for (i = 0; i < TM2_NUM_STREAMS; i++)
1022  av_freep(&l->tokens[i]);
1023  if (l->Y1) {
1024  av_freep(&l->Y1_base);
1025  av_freep(&l->U1_base);
1026  av_freep(&l->V1_base);
1027  av_freep(&l->Y2_base);
1028  av_freep(&l->U2_base);
1029  av_freep(&l->V2_base);
1030  }
1031  av_freep(&l->buffer);
1032  l->buffer_size = 0;
1033 
1034  av_frame_free(&l->pic);
1035 
1036  return 0;
1037 }
1038 
1040  .name = "truemotion2",
1041  .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"),
1042  .type = AVMEDIA_TYPE_VIDEO,
1044  .priv_data_size = sizeof(TM2Context),
1045  .init = decode_init,
1046  .close = decode_end,
1047  .decode = decode_frame,
1048  .capabilities = AV_CODEC_CAP_DR1,
1049 };
AV_CODEC_ID_TRUEMOTION2
@ AV_CODEC_ID_TRUEMOTION2
Definition: codec_id.h:126
tm2_low_res_block
static void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:551
AVCodec
AVCodec.
Definition: codec.h:190
bswapdsp.h
TM2Huff::val_bits
int val_bits
length of literal
Definition: truemotion2.c:104
stride
int stride
Definition: mace.c:144
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
TM2Codes::length
int length
Definition: truemotion2.c:97
TM2_INIT_POINTERS
#define TM2_INIT_POINTERS()
Definition: truemotion2.c:424
out
FILE * out
Definition: movenc.c:54
tm2_decode_blocks
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
Definition: truemotion2.c:768
GetByteContext
Definition: bytestream.h:33
TM2Huff::min_bits
int min_bits
minimum length of code
Definition: truemotion2.c:106
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
TM2_MOT
@ TM2_MOT
Definition: truemotion2.c:45
TM2_LOW_RES
@ TM2_LOW_RES
Definition: truemotion2.c:54
TM2_STILL
@ TM2_STILL
Definition: truemotion2.c:57
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
tm2_read_tree
static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff)
Definition: truemotion2.c:119
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
TM2Context::V1_base
int * V1_base
Definition: truemotion2.c:84
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
data
const char data[16]
Definition: mxf.c:91
TM2Huff::nodes
int nodes
total number of nodes in tree
Definition: truemotion2.c:107
tm2_update_block
static void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:674
TM2_C_HI
@ TM2_C_HI
Definition: truemotion2.c:40
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
t1
#define t1
Definition: regdef.h:29
TM2Context::uv_stride
int uv_stride
Definition: truemotion2.c:86
TM2Huff::nums
int * nums
literals
Definition: truemotion2.c:110
tm2_get_token
static int tm2_get_token(GetBitContext *gb, TM2Codes *code)
Definition: truemotion2.c:232
TM2Codes::bits
int bits
Definition: truemotion2.c:95
TM2Codes::vlc
VLC vlc
table for FFmpeg bitstream reader
Definition: truemotion2.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
TM2_UPD
@ TM2_UPD
Definition: truemotion2.c:44
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
tm2_med_res_block
static void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:529
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
TM2Codes::recode
int * recode
table for converting from code indexes to values
Definition: truemotion2.c:96
U
#define U(x)
Definition: vp56_arith.h:37
GetBitContext
Definition: get_bits.h:61
TM2Huff
structure for gathering Huffman codes information
Definition: truemotion2.c:103
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
val
static double val(void *priv, double ch)
Definition: aeval.c:76
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TM2_NUM_STREAMS
@ TM2_NUM_STREAMS
Definition: truemotion2.c:47
TM2_INIT_POINTERS_2
#define TM2_INIT_POINTERS_2()
Definition: truemotion2.c:438
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
tm2_read_deltas
static int tm2_read_deltas(TM2Context *ctx, int stream_id)
Definition: truemotion2.c:261
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
TM2_OLD_HEADER_MAGIC
#define TM2_OLD_HEADER_MAGIC
Definition: truemotion2.c:241
av_cold
#define av_cold
Definition: attributes.h:90
tm2_build_huff_table
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
Definition: truemotion2.c:150
TM2Context::buffer_size
int buffer_size
Definition: truemotion2.c:70
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
TM2Context::tok_ptrs
int tok_ptrs[TM2_NUM_STREAMS]
Definition: truemotion2.c:75
TM2Context::gb
GetBitContext gb
Definition: truemotion2.c:65
TM2_DELTAS
#define TM2_DELTAS
Definition: truemotion2.c:36
TM2_NEW_HEADER_MAGIC
#define TM2_NEW_HEADER_MAGIC
Definition: truemotion2.c:242
TM2Huff::lens
int * lens
codelengths
Definition: truemotion2.c:112
BswapDSPContext::bswap_buf
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
tm2_null_res_block
static void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:592
TM2_C_LO
@ TM2_C_LO
Definition: truemotion2.c:41
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TM2Context::CD
int CD[4]
Definition: truemotion2.c:79
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
TM2Context::y_stride
int y_stride
Definition: truemotion2.c:86
if
if(ret)
Definition: filter_design.txt:179
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
TM2_L_LO
@ TM2_L_LO
Definition: truemotion2.c:43
TM2Context::U2_base
int * U2_base
Definition: truemotion2.c:84
TM2Context
Definition: truemotion2.c:61
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
TM2Huff::bits
uint32_t * bits
codes
Definition: truemotion2.c:111
TM2_MED_RES
@ TM2_MED_RES
Definition: truemotion2.c:53
TM2Context::last
int * last
Definition: truemotion2.c:80
TM2Context::Y1
int * Y1
Definition: truemotion2.c:85
TM2Context::Y1_base
int * Y1_base
Definition: truemotion2.c:84
tm2_motion_block
static void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:714
tm2_apply_deltas
static void tm2_apply_deltas(TM2Context *ctx, int *Y, int stride, int *deltas, int *last)
Definition: truemotion2.c:458
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
TM2_MOTION
@ TM2_MOTION
Definition: truemotion2.c:58
GET_TOK
static int GET_TOK(TM2Context *ctx, int type)
Definition: truemotion2.c:404
TM2Huff::max_bits
int max_bits
maximum length of code
Definition: truemotion2.c:105
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
TM2Context::cur
int cur
Definition: truemotion2.c:87
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
tm2_stream_order
static const int tm2_stream_order[TM2_NUM_STREAMS]
Definition: truemotion2.c:886
TM2_HEADER_SIZE
#define TM2_HEADER_SIZE
Definition: truemotion2.c:890
TM2Context::deltas
int deltas[TM2_NUM_STREAMS][TM2_DELTAS]
Definition: truemotion2.c:76
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:206
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: truemotion2.c:892
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
TM2_ESCAPE
#define TM2_ESCAPE
Definition: truemotion2.c:35
mb
#define mb
Definition: vf_colormatrix.c:101
Y
#define Y
Definition: boxblur.h:38
ff_truemotion2_decoder
AVCodec ff_truemotion2_decoder
Definition: truemotion2.c:1039
TM2Context::error
int error
Definition: truemotion2.c:66
tm2_high_chroma
static void tm2_high_chroma(int *data, int stride, int *last, unsigned *CD, int *deltas)
Definition: truemotion2.c:476
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
TM2_HI_RES
@ TM2_HI_RES
Definition: truemotion2.c:52
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
tm2_hi_res_block
static void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:508
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
TM2_NULL_RES
@ TM2_NULL_RES
Definition: truemotion2.c:55
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
TM2Context::D
int D[4]
Definition: truemotion2.c:78
tm2_still_block
static void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
Definition: truemotion2.c:639
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
TM2Context::Y2
int * Y2
Definition: truemotion2.c:85
TM2Context::avctx
AVCodecContext * avctx
Definition: truemotion2.c:62
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1961
ret
ret
Definition: filter_design.txt:187
TM2Context::pic
AVFrame * pic
Definition: truemotion2.c:63
TM2Huff::max_num
int max_num
total number of codes
Definition: truemotion2.c:109
TM2Context::Y2_base
int * Y2_base
Definition: truemotion2.c:84
pos
unsigned int pos
Definition: spdifenc.c:412
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
TM2Context::tokens
int * tokens[TM2_NUM_STREAMS]
Definition: truemotion2.c:73
TM2Context::U2
int * U2
Definition: truemotion2.c:85
AVCodecContext
main external API structure.
Definition: avcodec.h:526
tm2_free_codes
static void tm2_free_codes(TM2Codes *code)
Definition: truemotion2.c:225
TM2_UPDATE
@ TM2_UPDATE
Definition: truemotion2.c:56
TM2_L_HI
@ TM2_L_HI
Definition: truemotion2.c:42
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
t2
#define t2
Definition: regdef.h:30
TM2Context::clast
int * clast
Definition: truemotion2.c:81
VLC
Definition: vlc.h:26
TM2Context::buffer
uint8_t * buffer
Definition: truemotion2.c:69
TM2_RECALC_BLOCK
#define TM2_RECALC_BLOCK(CHR, stride, last, CD)
Definition: truemotion2.c:451
TM2Context::V2_base
int * V2_base
Definition: truemotion2.c:84
TM2Huff::num
int num
current number filled
Definition: truemotion2.c:108
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: truemotion2.c:1014
TM2Context::tok_lens
int tok_lens[TM2_NUM_STREAMS]
Definition: truemotion2.c:74
tm2_low_chroma
static void tm2_low_chroma(int *data, int stride, int *clast, unsigned *CD, int *deltas, int bx)
Definition: truemotion2.c:489
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
TM2Codes
Huffman codes for each of streams.
Definition: truemotion2.c:93
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
TM2_BLOCKS
TM2_BLOCKS
Definition: truemotion2.c:51
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
TM2_STREAMS
TM2_STREAMS
Definition: truemotion2.c:39
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: truemotion2.c:950
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AVPacket
This structure stores compressed data.
Definition: packet.h:332
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
TM2Context::bdsp
BswapDSPContext bdsp
Definition: truemotion2.c:67
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2038
TM2Context::V1
int * V1
Definition: truemotion2.c:85
TM2Context::U1_base
int * U1_base
Definition: truemotion2.c:84
int
int
Definition: ffmpeg_filter.c:192
tm2_read_stream
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
Definition: truemotion2.c:288
tm2_read_header
static int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
Definition: truemotion2.c:244
TM2Context::U1
int * U1
Definition: truemotion2.c:85
TM2Context::V2
int * V2
Definition: truemotion2.c:85
TM2_TYPE
@ TM2_TYPE
Definition: truemotion2.c:46