FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 enum CFHDParam {
46  ImageWidth = 20,
54  ChannelWidth = 104,
57 };
58 
59 
60 
61 static av_cold int cfhd_init(AVCodecContext *avctx)
62 {
63  CFHDContext *s = avctx->priv_data;
64 
65  avctx->bits_per_raw_sample = 10;
66  s->avctx = avctx;
67 
68  return ff_cfhd_init_vlcs(s);
69 }
70 
72 {
73  s->subband_num = 0;
74  s->level = 0;
75  s->subband_num_actual = 0;
76 }
77 
79 {
80  s->peak.level = 0;
81  s->peak.offset = 0;
82  memset(&s->peak.base, 0, sizeof(s->peak.base));
83 }
84 
86 {
87  s->coded_width = 0;
88  s->coded_height = 0;
89  s->cropped_height = 0;
90  s->bpc = 10;
91  s->channel_cnt = 4;
92  s->subband_cnt = SUBBAND_COUNT;
93  s->channel_num = 0;
94  s->lowpass_precision = 16;
95  s->quantisation = 1;
96  s->wavelet_depth = 3;
97  s->pshift = 1;
98  s->codebook = 0;
99  s->difference_coding = 0;
100  s->progressive = 0;
103 }
104 
105 /* TODO: merge with VLC tables or use LUT */
106 static inline int dequant_and_decompand(int level, int quantisation, int codebook)
107 {
108  if (codebook == 0 || codebook == 1) {
109  int64_t abslevel = abs(level);
110  if (level < 264)
111  return (abslevel + ((768 * abslevel * abslevel * abslevel) / (255 * 255 * 255))) *
112  FFSIGN(level) * quantisation;
113  else
114  return level * quantisation;
115  } else
116  return level * quantisation;
117 }
118 
119 static inline void difference_coding(int16_t *band, int width, int height)
120 {
121 
122  int i,j;
123  for (i = 0; i < height; i++) {
124  for (j = 1; j < width; j++) {
125  band[j] += band[j-1];
126  }
127  band += width;
128  }
129 }
130 
131 static inline void peak_table(int16_t *band, Peak *peak, int length)
132 {
133  int i;
134  for (i = 0; i < length; i++)
135  if (abs(band[i]) > peak->level)
136  band[i] = bytestream2_get_le16(&peak->base);
137 }
138 
139 static inline void process_alpha(int16_t *alpha, int width)
140 {
141  int i, channel;
142  for (i = 0; i < width; i++) {
143  channel = alpha[i];
145  channel <<= 3;
147  channel >>= 16;
148  channel = av_clip_uintp2(channel, 12);
149  alpha[i] = channel;
150  }
151 }
152 
153 static inline void process_bayer(AVFrame *frame)
154 {
155  const int linesize = frame->linesize[0];
156  uint16_t *r = (uint16_t *)frame->data[0];
157  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
158  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
159  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
160  const int mid = 2048;
161 
162  for (int y = 0; y < frame->height >> 1; y++) {
163  for (int x = 0; x < frame->width; x += 2) {
164  int R, G1, G2, B;
165  int g, rg, bg, gd;
166 
167  g = r[x];
168  rg = g1[x];
169  bg = g2[x];
170  gd = b[x];
171  gd -= mid;
172 
173  R = (rg - mid) * 2 + g;
174  G1 = g + gd;
175  G2 = g - gd;
176  B = (bg - mid) * 2 + g;
177 
178  R = av_clip_uintp2(R * 16, 16);
179  G1 = av_clip_uintp2(G1 * 16, 16);
180  G2 = av_clip_uintp2(G2 * 16, 16);
181  B = av_clip_uintp2(B * 16, 16);
182 
183  r[x] = R;
184  g1[x] = G1;
185  g2[x] = G2;
186  b[x] = B;
187  }
188 
189  r += linesize;
190  g1 += linesize;
191  g2 += linesize;
192  b += linesize;
193  }
194 }
195 
196 static inline void filter(int16_t *output, ptrdiff_t out_stride,
197  int16_t *low, ptrdiff_t low_stride,
198  int16_t *high, ptrdiff_t high_stride,
199  int len, int clip)
200 {
201  int16_t tmp;
202  int i;
203 
204  for (i = 0; i < len; i++) {
205  if (i == 0) {
206  tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3;
207  output[(2*i+0)*out_stride] = (tmp + high[0*high_stride]) >> 1;
208  if (clip)
209  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
210 
211  tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3;
212  output[(2*i+1)*out_stride] = (tmp - high[0*high_stride]) >> 1;
213  if (clip)
214  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
215  } else if (i == len-1) {
216  tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3;
217  output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1;
218  if (clip)
219  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
220 
221  tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3;
222  output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1;
223  if (clip)
224  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
225  } else {
226  tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3;
227  output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1;
228  if (clip)
229  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
230 
231  tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3;
232  output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1;
233  if (clip)
234  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
235  }
236  }
237 }
238 
239 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
240  int width, int linesize, int plane)
241 {
242  int i;
243  int16_t even, odd;
244  for (i = 0; i < width; i++) {
245  even = (low[i] - high[i])/2;
246  odd = (low[i] + high[i])/2;
247  output[i] = av_clip_uintp2(even, 10);
248  output[i + linesize] = av_clip_uintp2(odd, 10);
249  }
250 }
251 static void horiz_filter(int16_t *output, int16_t *low, int16_t *high,
252  int width)
253 {
254  filter(output, 1, low, 1, high, 1, width, 0);
255 }
256 
257 static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high,
258  int width, int clip)
259 {
260  filter(output, 1, low, 1, high, 1, width, clip);
261 }
262 
263 static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high,
264  int width, int clip)
265 {
266  filter(output, 2, low, 1, high, 1, width, clip);
267 }
268 
269 static void vert_filter(int16_t *output, ptrdiff_t out_stride,
270  int16_t *low, ptrdiff_t low_stride,
271  int16_t *high, ptrdiff_t high_stride, int len)
272 {
273  filter(output, out_stride, low, low_stride, high, high_stride, len, 0);
274 }
275 
277 {
278  int i, j;
279 
280  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
281  av_freep(&s->plane[i].idwt_buf);
282  av_freep(&s->plane[i].idwt_tmp);
283 
284  for (j = 0; j < 9; j++)
285  s->plane[i].subband[j] = NULL;
286 
287  for (j = 0; j < 8; j++)
288  s->plane[i].l_h[j] = NULL;
289  }
290  s->a_height = 0;
291  s->a_width = 0;
292 }
293 
294 static int alloc_buffers(AVCodecContext *avctx)
295 {
296  CFHDContext *s = avctx->priv_data;
297  int i, j, ret, planes;
298  int chroma_x_shift, chroma_y_shift;
299  unsigned k;
300 
301  if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
302  s->coded_width *= 2;
303  s->coded_height *= 2;
304  }
305 
306  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
307  return ret;
308  avctx->pix_fmt = s->coded_format;
309 
310  if ((ret = av_pix_fmt_get_chroma_sub_sample(s->coded_format,
311  &chroma_x_shift,
312  &chroma_y_shift)) < 0)
313  return ret;
314  planes = av_pix_fmt_count_planes(s->coded_format);
315  if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
316  planes = 4;
317  chroma_x_shift = 1;
318  chroma_y_shift = 1;
319  }
320 
321  for (i = 0; i < planes; i++) {
322  int w8, h8, w4, h4, w2, h2;
323  int width = i ? avctx->width >> chroma_x_shift : avctx->width;
324  int height = i ? avctx->height >> chroma_y_shift : avctx->height;
325  ptrdiff_t stride = FFALIGN(width / 8, 8) * 8;
326  if (chroma_y_shift)
327  height = FFALIGN(height / 8, 2) * 8;
328  s->plane[i].width = width;
329  s->plane[i].height = height;
330  s->plane[i].stride = stride;
331 
332  w8 = FFALIGN(s->plane[i].width / 8, 8);
333  h8 = height / 8;
334  w4 = w8 * 2;
335  h4 = h8 * 2;
336  w2 = w4 * 2;
337  h2 = h4 * 2;
338 
339  s->plane[i].idwt_buf =
340  av_mallocz_array(height * stride, sizeof(*s->plane[i].idwt_buf));
341  s->plane[i].idwt_tmp =
342  av_malloc_array(height * stride, sizeof(*s->plane[i].idwt_tmp));
343  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
344  return AVERROR(ENOMEM);
345 
346  s->plane[i].subband[0] = s->plane[i].idwt_buf;
347  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
348  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
349  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
350  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
351  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
352  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
353  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
354  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
355  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
356 
357  for (j = 0; j < DWT_LEVELS; j++) {
358  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
359  s->plane[i].band[j][k].a_width = w8 << j;
360  s->plane[i].band[j][k].a_height = h8 << j;
361  }
362  }
363 
364  /* ll2 and ll1 commented out because they are done in-place */
365  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
366  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
367  // s->plane[i].l_h[2] = ll2;
368  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
369  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
370  // s->plane[i].l_h[5] = ll1;
371  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
372  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
373  }
374 
375  s->a_height = s->coded_height;
376  s->a_width = s->coded_width;
377  s->a_format = s->coded_format;
378 
379  return 0;
380 }
381 
382 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
383  AVPacket *avpkt)
384 {
385  CFHDContext *s = avctx->priv_data;
386  GetByteContext gb;
387  ThreadFrame frame = { .f = data };
388  AVFrame *pic = data;
389  int ret = 0, i, j, planes, plane, got_buffer = 0;
390  int16_t *coeff_data;
391 
392  s->coded_format = AV_PIX_FMT_YUV422P10;
394  planes = av_pix_fmt_count_planes(s->coded_format);
395 
396  bytestream2_init(&gb, avpkt->data, avpkt->size);
397 
398  while (bytestream2_get_bytes_left(&gb) > 4) {
399  /* Bit weird but implement the tag parsing as the spec says */
400  uint16_t tagu = bytestream2_get_be16(&gb);
401  int16_t tag = (int16_t)tagu;
402  int8_t tag8 = (int8_t)(tagu >> 8);
403  uint16_t abstag = abs(tag);
404  int8_t abs_tag8 = abs(tag8);
405  uint16_t data = bytestream2_get_be16(&gb);
406  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
407  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
408  } else if (tag == SampleFlags) {
409  av_log(avctx, AV_LOG_DEBUG, "Progressive?%"PRIu16"\n", data);
410  s->progressive = data & 0x0001;
411  } else if (tag == ImageWidth) {
412  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
413  s->coded_width = data;
414  } else if (tag == ImageHeight) {
415  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
416  s->coded_height = data;
417  } else if (tag == 101) {
418  av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data);
419  if (data < 1 || data > 31) {
420  av_log(avctx, AV_LOG_ERROR, "Bits per component %d is invalid\n", data);
421  ret = AVERROR(EINVAL);
422  break;
423  }
424  s->bpc = data;
425  } else if (tag == ChannelCount) {
426  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
427  s->channel_cnt = data;
428  if (data > 4) {
429  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
431  break;
432  }
433  } else if (tag == SubbandCount) {
434  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
435  if (data != SUBBAND_COUNT) {
436  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
438  break;
439  }
440  } else if (tag == ChannelNumber) {
441  s->channel_num = data;
442  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
443  if (s->channel_num >= planes) {
444  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
445  ret = AVERROR(EINVAL);
446  break;
447  }
449  } else if (tag == SubbandNumber) {
450  if (s->subband_num != 0 && data == 1) // hack
451  s->level++;
452  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
453  s->subband_num = data;
454  if (s->level >= DWT_LEVELS) {
455  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
456  ret = AVERROR(EINVAL);
457  break;
458  }
459  if (s->subband_num > 3) {
460  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
461  ret = AVERROR(EINVAL);
462  break;
463  }
464  } else if (tag == 51) {
465  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
466  s->subband_num_actual = data;
467  if (s->subband_num_actual >= 10) {
468  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
469  ret = AVERROR(EINVAL);
470  break;
471  }
472  } else if (tag == LowpassPrecision)
473  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
474  else if (tag == Quantization) {
475  s->quantisation = data;
476  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
477  } else if (tag == PrescaleShift) {
478  s->prescale_shift[0] = (data >> 0) & 0x7;
479  s->prescale_shift[1] = (data >> 3) & 0x7;
480  s->prescale_shift[2] = (data >> 6) & 0x7;
481  av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
482  } else if (tag == 27) {
483  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
484  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
485  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
486  ret = AVERROR(EINVAL);
487  break;
488  }
489  s->plane[s->channel_num].band[0][0].width = data;
490  s->plane[s->channel_num].band[0][0].stride = data;
491  } else if (tag == 28) {
492  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
493  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) {
494  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
495  ret = AVERROR(EINVAL);
496  break;
497  }
498  s->plane[s->channel_num].band[0][0].height = data;
499  } else if (tag == 1)
500  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
501  else if (tag == 10) {
502  if (data != 0) {
503  avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
505  break;
506  } else if (data == 1) {
507  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
509  break;
510  }
511  av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data);
512  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
513  if (abstag == 0x4001)
514  s->peak.level = 0;
515  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
516  bytestream2_skipu(&gb, data * 4);
517  } else if (tag == 23) {
518  av_log(avctx, AV_LOG_DEBUG, "Skip frame\n");
519  avpriv_report_missing_feature(avctx, "Skip frame");
521  break;
522  } else if (tag == 2) {
523  av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
524  if (data > bytestream2_get_bytes_left(&gb) / 4) {
525  av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
527  break;
528  }
529  for (i = 0; i < data; i++) {
530  uint16_t tag2 = bytestream2_get_be16(&gb);
531  uint16_t val2 = bytestream2_get_be16(&gb);
532  av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
533  }
534  } else if (tag == 41) {
535  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
536  if (data < 3) {
537  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
538  ret = AVERROR(EINVAL);
539  break;
540  }
541  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
542  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
543  } else if (tag == 42) {
544  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
545  if (data < 3) {
546  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
547  ret = AVERROR(EINVAL);
548  break;
549  }
550  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
551  } else if (tag == 49) {
552  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
553  if (data < 3) {
554  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
555  ret = AVERROR(EINVAL);
556  break;
557  }
558  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
559  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
560  } else if (tag == 50) {
561  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
562  if (data < 3) {
563  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
564  ret = AVERROR(EINVAL);
565  break;
566  }
567  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
568  } else if (tag == 71) {
569  s->codebook = data;
570  av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook);
571  } else if (tag == 72) {
572  s->codebook = data & 0xf;
573  s->difference_coding = (data >> 4) & 1;
574  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
575  } else if (tag == 70) {
576  av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data);
577  if (!(data == 10 || data == 12)) {
578  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
579  ret = AVERROR(EINVAL);
580  break;
581  }
582  s->bpc = data;
583  } else if (tag == 84) {
584  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
585  if (data == 1) {
586  s->coded_format = AV_PIX_FMT_YUV422P10;
587  } else if (data == 2) {
588  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
589  } else if (data == 3) {
590  s->coded_format = AV_PIX_FMT_GBRP12;
591  } else if (data == 4) {
592  s->coded_format = AV_PIX_FMT_GBRAP12;
593  } else {
594  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
596  break;
597  }
598  planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
599  } else if (tag == -85) {
600  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
601  s->cropped_height = data;
602  } else if (tag == -75) {
603  s->peak.offset &= ~0xffff;
604  s->peak.offset |= (data & 0xffff);
605  s->peak.base = gb;
606  s->peak.level = 0;
607  } else if (tag == -76) {
608  s->peak.offset &= 0xffff;
609  s->peak.offset |= (data & 0xffffU)<<16;
610  s->peak.base = gb;
611  s->peak.level = 0;
612  } else if (tag == -74 && s->peak.offset) {
613  s->peak.level = data;
614  if (s->peak.offset < 4 - bytestream2_tell(&s->peak.base) ||
615  s->peak.offset > 4 + bytestream2_get_bytes_left(&s->peak.base)
616  ) {
618  goto end;
619  }
620  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
621  } else
622  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
623 
624  /* Some kind of end of header tag */
625  if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height &&
626  s->coded_format != AV_PIX_FMT_NONE) {
627  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
628  s->a_format != s->coded_format) {
629  free_buffers(s);
630  if ((ret = alloc_buffers(avctx)) < 0) {
631  free_buffers(s);
632  return ret;
633  }
634  }
635  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
636  if (ret < 0)
637  return ret;
638  if (s->cropped_height) {
639  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
640  if (avctx->height < height)
641  return AVERROR_INVALIDDATA;
642  avctx->height = height;
643  }
644  frame.f->width =
645  frame.f->height = 0;
646 
647  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
648  return ret;
649 
650  s->coded_width = 0;
651  s->coded_height = 0;
652  s->coded_format = AV_PIX_FMT_NONE;
653  got_buffer = 1;
654  }
655  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
656 
657  /* Lowpass coefficients */
658  if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) {
659  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
660  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
661  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
662  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
663 
664  if (!got_buffer) {
665  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
666  ret = AVERROR(EINVAL);
667  goto end;
668  }
669 
670  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
671  lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
672  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
673  ret = AVERROR(EINVAL);
674  goto end;
675  }
676 
677  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
678  for (i = 0; i < lowpass_height; i++) {
679  for (j = 0; j < lowpass_width; j++)
680  coeff_data[j] = bytestream2_get_be16u(&gb);
681 
682  coeff_data += lowpass_width;
683  }
684 
685  /* Align to mod-4 position to continue reading tags */
686  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
687 
688  /* Copy last line of coefficients if odd height */
689  if (lowpass_height & 1) {
690  memcpy(&coeff_data[lowpass_height * lowpass_width],
691  &coeff_data[(lowpass_height - 1) * lowpass_width],
692  lowpass_width * sizeof(*coeff_data));
693  }
694 
695  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
696  }
697 
698  if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) {
699  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
700  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
701  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
702  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
703  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
704  int expected;
705  int a_expected = highpass_a_height * highpass_a_width;
706  int level, run, coeff;
707  int count = 0, bytes;
708 
709  if (!got_buffer) {
710  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
711  ret = AVERROR(EINVAL);
712  goto end;
713  }
714 
715  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
716  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
717  ret = AVERROR(EINVAL);
718  goto end;
719  }
720  expected = highpass_height * highpass_stride;
721 
722  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
723 
724  init_get_bits(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb) * 8);
725  {
726  OPEN_READER(re, &s->gb);
727  if (!s->codebook) {
728  while (1) {
729  UPDATE_CACHE(re, &s->gb);
730  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
731  VLC_BITS, 3, 1);
732 
733  /* escape */
734  if (level == 64)
735  break;
736 
737  count += run;
738 
739  if (count > expected)
740  break;
741 
742  coeff = dequant_and_decompand(level, s->quantisation, 0);
743  for (i = 0; i < run; i++)
744  *coeff_data++ = coeff;
745  }
746  } else {
747  while (1) {
748  UPDATE_CACHE(re, &s->gb);
749  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
750  VLC_BITS, 3, 1);
751 
752  /* escape */
753  if (level == 255 && run == 2)
754  break;
755 
756  count += run;
757 
758  if (count > expected)
759  break;
760 
761  coeff = dequant_and_decompand(level, s->quantisation, s->codebook);
762  for (i = 0; i < run; i++)
763  *coeff_data++ = coeff;
764  }
765  }
766  CLOSE_READER(re, &s->gb);
767  }
768 
769  if (count > expected) {
770  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
771  ret = AVERROR(EINVAL);
772  goto end;
773  }
774  if (s->peak.level)
775  peak_table(coeff_data - count, &s->peak, count);
776  if (s->difference_coding)
777  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
778 
779  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
780  if (bytes > bytestream2_get_bytes_left(&gb)) {
781  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
782  ret = AVERROR(EINVAL);
783  goto end;
784  } else
785  bytestream2_seek(&gb, bytes, SEEK_CUR);
786 
787  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
788  s->codebook = 0;
789 
790  /* Copy last line of coefficients if odd height */
791  if (highpass_height & 1) {
792  memcpy(&coeff_data[highpass_height * highpass_stride],
793  &coeff_data[(highpass_height - 1) * highpass_stride],
794  highpass_stride * sizeof(*coeff_data));
795  }
796  }
797  }
798 
799  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
800  s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
801  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
802  ret = AVERROR(EINVAL);
803  goto end;
804  }
805 
806  if (!got_buffer) {
807  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
808  ret = AVERROR(EINVAL);
809  goto end;
810  }
811 
813  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
814  if (!s->progressive)
815  return AVERROR_INVALIDDATA;
816  planes = 4;
817  }
818 
819  for (plane = 0; plane < planes && !ret; plane++) {
820  /* level 1 */
821  int lowpass_height = s->plane[plane].band[0][0].height;
822  int lowpass_width = s->plane[plane].band[0][0].width;
823  int highpass_stride = s->plane[plane].band[0][1].stride;
824  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
825  ptrdiff_t dst_linesize;
826  int16_t *low, *high, *output, *dst;
827 
828  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
829  act_plane = 0;
830  dst_linesize = pic->linesize[act_plane];
831  } else {
832  dst_linesize = pic->linesize[act_plane] / 2;
833  }
834 
835  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
836  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
837  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
838  ret = AVERROR(EINVAL);
839  goto end;
840  }
841 
842  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
843 
844  low = s->plane[plane].subband[0];
845  high = s->plane[plane].subband[2];
846  output = s->plane[plane].l_h[0];
847  for (i = 0; i < lowpass_width; i++) {
848  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
849  low++;
850  high++;
851  output++;
852  }
853 
854  low = s->plane[plane].subband[1];
855  high = s->plane[plane].subband[3];
856  output = s->plane[plane].l_h[1];
857 
858  for (i = 0; i < lowpass_width; i++) {
859  // note the stride of "low" is highpass_stride
860  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
861  low++;
862  high++;
863  output++;
864  }
865 
866  low = s->plane[plane].l_h[0];
867  high = s->plane[plane].l_h[1];
868  output = s->plane[plane].subband[0];
869  for (i = 0; i < lowpass_height * 2; i++) {
870  horiz_filter(output, low, high, lowpass_width);
871  low += lowpass_width;
872  high += lowpass_width;
873  output += lowpass_width * 2;
874  }
875  if (s->bpc == 12) {
876  output = s->plane[plane].subband[0];
877  for (i = 0; i < lowpass_height * 2; i++) {
878  for (j = 0; j < lowpass_width * 2; j++)
879  output[j] *= 4;
880 
881  output += lowpass_width * 2;
882  }
883  }
884 
885  /* level 2 */
886  lowpass_height = s->plane[plane].band[1][1].height;
887  lowpass_width = s->plane[plane].band[1][1].width;
888  highpass_stride = s->plane[plane].band[1][1].stride;
889 
890  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
891  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
892  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
893  ret = AVERROR(EINVAL);
894  goto end;
895  }
896 
897  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
898 
899  low = s->plane[plane].subband[0];
900  high = s->plane[plane].subband[5];
901  output = s->plane[plane].l_h[3];
902  for (i = 0; i < lowpass_width; i++) {
903  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
904  low++;
905  high++;
906  output++;
907  }
908 
909  low = s->plane[plane].subband[4];
910  high = s->plane[plane].subband[6];
911  output = s->plane[plane].l_h[4];
912  for (i = 0; i < lowpass_width; i++) {
913  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
914  low++;
915  high++;
916  output++;
917  }
918 
919  low = s->plane[plane].l_h[3];
920  high = s->plane[plane].l_h[4];
921  output = s->plane[plane].subband[0];
922  for (i = 0; i < lowpass_height * 2; i++) {
923  horiz_filter(output, low, high, lowpass_width);
924  low += lowpass_width;
925  high += lowpass_width;
926  output += lowpass_width * 2;
927  }
928 
929  output = s->plane[plane].subband[0];
930  for (i = 0; i < lowpass_height * 2; i++) {
931  for (j = 0; j < lowpass_width * 2; j++)
932  output[j] *= 4;
933 
934  output += lowpass_width * 2;
935  }
936 
937  /* level 3 */
938  lowpass_height = s->plane[plane].band[2][1].height;
939  lowpass_width = s->plane[plane].band[2][1].width;
940  highpass_stride = s->plane[plane].band[2][1].stride;
941 
942  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
943  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
944  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
945  ret = AVERROR(EINVAL);
946  goto end;
947  }
948 
949  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
950  if (s->progressive) {
951  low = s->plane[plane].subband[0];
952  high = s->plane[plane].subband[8];
953  output = s->plane[plane].l_h[6];
954  for (i = 0; i < lowpass_width; i++) {
955  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
956  low++;
957  high++;
958  output++;
959  }
960 
961  low = s->plane[plane].subband[7];
962  high = s->plane[plane].subband[9];
963  output = s->plane[plane].l_h[7];
964  for (i = 0; i < lowpass_width; i++) {
965  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
966  low++;
967  high++;
968  output++;
969  }
970 
971  dst = (int16_t *)pic->data[act_plane];
972  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
973  if (plane & 1)
974  dst++;
975  if (plane > 1)
976  dst += pic->linesize[act_plane] >> 1;
977  }
978  low = s->plane[plane].l_h[6];
979  high = s->plane[plane].l_h[7];
980 
981  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
982  (lowpass_height * 2 > avctx->coded_height / 2 ||
983  lowpass_width * 2 > avctx->coded_width / 2 )
984  ) {
986  goto end;
987  }
988 
989  for (i = 0; i < lowpass_height * 2; i++) {
990  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
991  horiz_filter_clip_bayer(dst, low, high, lowpass_width, s->bpc);
992  else
993  horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
994  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
995  process_alpha(dst, lowpass_width * 2);
996  low += lowpass_width;
997  high += lowpass_width;
998  dst += dst_linesize;
999  }
1000  } else {
1001  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
1002  pic->interlaced_frame = 1;
1003  low = s->plane[plane].subband[0];
1004  high = s->plane[plane].subband[7];
1005  output = s->plane[plane].l_h[6];
1006  for (i = 0; i < lowpass_height; i++) {
1007  horiz_filter(output, low, high, lowpass_width);
1008  low += lowpass_width;
1009  high += lowpass_width;
1010  output += lowpass_width * 2;
1011  }
1012 
1013  low = s->plane[plane].subband[8];
1014  high = s->plane[plane].subband[9];
1015  output = s->plane[plane].l_h[7];
1016  for (i = 0; i < lowpass_height; i++) {
1017  horiz_filter(output, low, high, lowpass_width);
1018  low += lowpass_width;
1019  high += lowpass_width;
1020  output += lowpass_width * 2;
1021  }
1022 
1023  dst = (int16_t *)pic->data[act_plane];
1024  low = s->plane[plane].l_h[6];
1025  high = s->plane[plane].l_h[7];
1026  for (i = 0; i < lowpass_height; i++) {
1027  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1028  low += lowpass_width * 2;
1029  high += lowpass_width * 2;
1030  dst += pic->linesize[act_plane];
1031  }
1032  }
1033  }
1034 
1035 
1036  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1037  process_bayer(pic);
1038 end:
1039  if (ret < 0)
1040  return ret;
1041 
1042  *got_frame = 1;
1043  return avpkt->size;
1044 }
1045 
1047 {
1048  CFHDContext *s = avctx->priv_data;
1049 
1050  free_buffers(s);
1051 
1052  ff_free_vlc(&s->vlc_9);
1053  ff_free_vlc(&s->vlc_18);
1054 
1055  return 0;
1056 }
1057 
1059  .name = "cfhd",
1060  .long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
1061  .type = AVMEDIA_TYPE_VIDEO,
1062  .id = AV_CODEC_ID_CFHD,
1063  .priv_data_size = sizeof(CFHDContext),
1064  .init = cfhd_init,
1065  .close = cfhd_close,
1066  .decode = cfhd_decode,
1067  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1069 };
AVCodec
AVCodec.
Definition: codec.h:190
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
level
uint8_t level
Definition: svq3.c:210
Peak::level
int level
Definition: cfhd.h:73
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
GetByteContext
Definition: bytestream.h:33
horiz_filter
static void horiz_filter(int16_t *output, int16_t *low, int16_t *high, int width)
Definition: cfhd.c:251
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229
SampleFlags
@ SampleFlags
Definition: cfhd.c:52
R
#define R
Definition: huffyuvdsp.h:34
internal.h
even
Tag MUST be even
Definition: snow.txt:206
AVPacket::data
uint8_t * data
Definition: packet.h:355
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
ALPHA_COMPAND_DC_OFFSET
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
ChannelNumber
@ ChannelNumber
Definition: cfhd.c:51
cfhd_init
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:61
difference_coding
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:119
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
ff_cfhd_init_vlcs
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
CFHDParam
CFHDParam
Definition: cfhd.c:43
init_peak_table_defaults
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:78
cfhd.h
U
#define U(x)
Definition: vp56_arith.h:37
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
VLC_BITS
#define VLC_BITS
Definition: asvdec.c:37
DWT_LEVELS
#define DWT_LEVELS
Definition: cfhd.h:42
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
peak_table
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:131
cfhd_decode
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:382
process_alpha
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:139
AV_CODEC_ID_CFHD
@ AV_CODEC_ID_CFHD
Definition: codec_id.h:261
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:418
ALPHA_COMPAND_GAIN
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
horiz_filter_clip
static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:257
g
const char * g
Definition: vf_curves.c:115
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
process_bayer
static void process_bayer(AVFrame *frame)
Definition: cfhd.c:153
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ImageWidth
@ ImageWidth
Definition: cfhd.c:46
ImageHeight
@ ImageHeight
Definition: cfhd.c:47
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
ChannelWidth
@ ChannelWidth
Definition: cfhd.c:54
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:209
Peak
Definition: cfhd.h:72
abs
#define abs(x)
Definition: cuda_runtime.h:35
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
free_buffers
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:276
SubbandCount
@ SubbandCount
Definition: cfhd.c:45
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
SUBBAND_COUNT
#define SUBBAND_COUNT
Definition: cfhd.h:34
BitsPerComponent
@ BitsPerComponent
Definition: cfhd.c:53
alloc_buffers
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:294
Quantization
@ Quantization
Definition: cfhd.c:50
ff_cfhd_decoder
AVCodec ff_cfhd_decoder
Definition: cfhd.c:1058
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
Peak::base
GetByteContext base
Definition: cfhd.h:75
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
ChannelHeight
@ ChannelHeight
Definition: cfhd.c:55
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
horiz_filter_clip_bayer
static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:263
buffer.h
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
attributes.h
ChannelCount
@ ChannelCount
Definition: cfhd.c:44
planes
static const struct @315 planes[]
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:447
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
CFHDContext
Definition: cfhd.h:78
common.h
filter
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:196
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
len
int len
Definition: vorbis_enc_data.h:452
LowpassPrecision
@ LowpassPrecision
Definition: cfhd.c:48
dequant_and_decompand
static int dequant_and_decompand(int level, int quantisation, int codebook)
Definition: cfhd.c:106
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:738
tag
uint32_t tag
Definition: movenc.c:1532
SubbandNumber
@ SubbandNumber
Definition: cfhd.c:49
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
interlaced_vertical_filter
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:239
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ThreadFrame
Definition: thread.h:34
init_plane_defaults
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:71
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
vert_filter
static void vert_filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len)
Definition: cfhd.c:269
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
PrescaleShift
@ PrescaleShift
Definition: cfhd.c:56
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:422
clip
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
Definition: vf_lut.c:162
cfhd_close
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1046
init_frame_defaults
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:85
channel
channel
Definition: ebur128.h:39
re
float re
Definition: fft.c:82