Go to the documentation of this file.
30 static const int16_t
steps[16] = {
31 4084, 18, 41, 64, 112, 198, 355, 1122,
32 1122, 355, 198, 112, 64, 41, 18, 4084,
35 static const int16_t
diffs[16] = {
36 2048, 4, 135, 213, 273, 323, 373, 425,
37 425, 373, 323, 273, 213, 135, 4, 2048,
76 #define WEIGHTSBITS 26
85 for (
int i = 0;
i < 6;
i++) {
86 const int sign =
FFSIGN(
c->diffs_tab[
i]);
87 c->weights_tab[
i] = (
c->weights_tab[
i] * 255LL) / 256;
88 c->weights_tab[
i] += (1LL << (
WEIGHTSBITS + 1)) * sign * isign;
91 memmove(&
c->diffs_tab[1], &
c->diffs_tab[0], 5 *
sizeof(
int32_t));
94 c->pred =
c->new_pred;
97 for (
int i = 0;
i < 6;
i++)
102 c->pred =
c->pred * 9 / 10;
110 uint32_t
step, newstep;
113 diff_sign = nibble >> 3;
116 newstep =
step & 0xfff;
117 if (newstep >> 11 == 0)
118 adiff = (((
step & 0x7f) + 0x80) * 128) >>
119 (14 - (newstep >> 7));
120 delta = diff_sign ? -adiff : adiff;
123 nibble =
steps[nibble];
124 newstep = nibble * 32 -
c->last_step & 0x1ffff;
125 newstep = ((newstep >> 5) + (newstep & 0x10000 ? 0x1000 : 0) +
c->last_step) & 0x1fff;
126 c->last_step =
av_clip(newstep, 544, 5120);
145 hdr = bytestream2_peek_be32(gb);
146 if (hdr ==
s->marker) {
148 }
else if ((hdr >> 16) ==
s->marker) {
157 for (n = 0; n < 29; n++) {
158 int nibble = bytestream2_get_byte(gb);
165 if (n == 29 && bytestream2_get_byte(gb) != 0x55)
184 AV_CODEC_CAP_SUBFRAMES |
int sample_rate
samples per second
static const int16_t diffs[16]
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
int nb_channels
Number of channels in this layout.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
AVCodec p
The public AVCodec.
AVChannelLayout ch_layout
Audio channel layout.
#define FFDIFFSIGN(x, y)
Comparator.
#define FF_CODEC_DECODE_CB(func)
#define CODEC_LONG_NAME(str)
static int16_t decode(ChannelContext *c, unsigned nibble)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
const FFCodec ff_misc4_decoder
enum AVSampleFormat sample_fmt
audio sample format
static int64_t prediction(int delta, ChannelContext *c)
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
#define i(width, name, range_min, range_max)
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
const char * name
Name of the codec implementation.
static const float pred[4]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const int16_t steps[16]
main external API structure.
Filter the word “frame” indicates either a video frame or a group of audio samples
This structure stores compressed data.
static av_cold int misc4_init(AVCodecContext *avctx)
static int misc4_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *pkt)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.