Go to the documentation of this file.
37 #define DST_MAX_CHANNELS 6
38 #define DST_MAX_ELEMENTS (2 * DST_MAX_CHANNELS)
40 #define DSD_FS44(sample_rate) (sample_rate * 8LL / 44100)
42 #define DST_SAMPLES_PER_FRAME(sample_rate) (588 * DSD_FS44(sample_rate))
101 memset(
s->dsdctx[
i].buf, 0x69,
sizeof(
s->dsdctx[
i].buf));
140 int coeff_bits,
int is_signed,
int offset)
150 int length_bits,
int coeff_bits,
int is_signed,
int offset)
152 unsigned int i, j, k;
158 int method =
get_bits(gb, 2), lsb_size;
165 for (j = method + 1; j < t->
length[
i]; j++) {
167 for (k = 0; k < method + 1; k++)
168 x += code_pred_coeff[method][k] * (
unsigned)t->
coeff[
i][j - k - 1];
175 if (c < offset || c >=
offset + (1<<coeff_bits))
193 unsigned int k = (ac->
a >> 8) | ((ac->
a >> 7) & 1);
194 unsigned int q = k * p;
195 unsigned int a_q = ac->
a - q;
224 for (j = 0; j < 16; j++) {
225 int total =
av_clip(length - j * 8, 0, 8);
227 for (k = 0; k < 256; k++) {
230 for (l = 0; l < total; l++)
231 v += (((k >> l) & 1) * 2 - 1) * fsets->
coeff[
i][j * 8 + l];
239 int *got_frame_ptr,
AVPacket *avpkt)
244 unsigned i, ch, same_map, dst_x_bit;
255 if (avpkt->
size <= 1)
258 frame->nb_samples = samples_per_frame / 8;
261 dsd =
frame->data[0];
262 pcm = (
float *)
frame->data[0];
300 s->probs.elements =
s->fsets.elements;
301 memcpy(map_ch_to_pelem, map_ch_to_felem,
sizeof(map_ch_to_felem));
333 memset(
s->status, 0xAA,
sizeof(
s->status));
338 for (
i = 0;
i < samples_per_frame;
i++) {
340 const unsigned felem = map_ch_to_felem[ch];
341 int16_t (*
filter)[256] =
s->filter[felem];
342 uint8_t *
status =
s->status[ch];
343 int prob, residual, v;
345 #define F(x) filter[(x)][status[(x)]]
346 const int16_t
predict =
F( 0) +
F( 1) +
F( 2) +
F( 3) +
347 F( 4) +
F( 5) +
F( 6) +
F( 7) +
348 F( 8) +
F( 9) +
F(10) +
F(11) +
349 F(12) +
F(13) +
F(14) +
F(15);
352 if (!half_prob[ch] ||
i >=
s->fsets.length[felem]) {
353 unsigned pelem = map_ch_to_pelem[ch];
361 v = ((
predict >> 15) ^ residual) & 1;
362 dsd[((
i >> 3) *
channels + ch) << 2] |= v << (7 - (
i & 0x7 ));
static av_always_inline void ac_get(ArithCoder *ac, GetBitContext *gb, int p, int *e)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
static int get_bits_left(GetBitContext *gb)
uint8_t status[DST_MAX_CHANNELS][16]
static const ElemCat * elements[ELEMENT_COUNT]
int sample_rate
samples per second
static enum AVSampleFormat sample_fmts[]
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
This structure describes decoded (raw) audio or video data.
const uint8_t ff_reverse[256]
static const uint16_t table[]
static int read_table(GetBitContext *gb, Table *t, const int8_t code_pred_coeff[3][3], int length_bits, int coeff_bits, int is_signed, int offset)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void ac_init(ArithCoder *ac, GetBitContext *gb)
static int read_map(GetBitContext *gb, Table *t, unsigned int map[DST_MAX_CHANNELS], int channels)
static int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int esc_len)
read unsigned golomb rice code (jpegls).
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static av_cold int decode_init(AVCodecContext *avctx)
static int get_sbits(GetBitContext *s, int n)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static unsigned int get_bits1(GetBitContext *s)
int16_t filter[DST_MAX_ELEMENTS][16][256]
static const int8_t fsets_code_pred_coeff[3][3]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
av_cold void ff_init_dsd_data(void)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
enum AVSampleFormat sample_fmt
audio sample format
static const int8_t probs_code_pred_coeff[3][3]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void skip_bits1(GetBitContext *s)
DSDContext dsdctx[DST_MAX_CHANNELS]
int channels
number of audio channels
#define DECLARE_ALIGNED(n, t, v)
static uint8_t prob_dst_x_bit(int c)
#define i(width, name, range_min, range_max)
static av_always_inline int get_sr_golomb_dst(GetBitContext *gb, unsigned int k)
static av_always_inline void predict(PredictorState *ps, float *coef, int output_enable)
AVSampleFormat
Audio sample formats.
const char * name
Name of the codec implementation.
static void read_uncoded_coeff(GetBitContext *gb, int *dst, unsigned int elements, int coeff_bits, int is_signed, int offset)
void ff_dsd2pcm_translate(DSDContext *s, size_t samples, int lsbf, const uint8_t *src, ptrdiff_t src_stride, float *dst, ptrdiff_t dst_stride)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define prob(name, subs,...)
static void build_filter(int16_t table[DST_MAX_ELEMENTS][16][256], const Table *fsets)
unsigned int length[DST_MAX_ELEMENTS]
main external API structure.
const AVCodec ff_dst_decoder
#define avpriv_request_sample(...)
const VDPAUPixFmtMap * map
This structure stores compressed data.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int coeff[DST_MAX_ELEMENTS][128]
#define DST_SAMPLES_PER_FRAME(sample_rate)