FFmpeg
af_axcorrelate.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/audio_fifo.h"
23 #include "libavutil/common.h"
24 #include "libavutil/opt.h"
25 
26 #include "audio.h"
27 #include "avfilter.h"
28 #include "filters.h"
29 
30 typedef struct AudioXCorrelateContext {
31  const AVClass *class;
32 
33  int size;
34  int algo;
36 
42  int used;
43  int eof;
44 
47 
48 #define MEAN_SUM(suffix, type, zero) \
49 static type mean_sum_##suffix(const type *in, \
50  int size) \
51 { \
52  type mean_sum = zero; \
53  \
54  for (int i = 0; i < size; i++) \
55  mean_sum += in[i]; \
56  \
57  return mean_sum; \
58 }
59 
60 MEAN_SUM(f, float, 0.f)
61 MEAN_SUM(d, double, 0.0)
62 
63 #define SQUARE_SUM(suffix, type, zero) \
64 static type square_sum_##suffix(const type *x, \
65  const type *y, \
66  int size) \
67 { \
68  type square_sum = zero; \
69  \
70  for (int i = 0; i < size; i++) \
71  square_sum += x[i] * y[i]; \
72  \
73  return square_sum; \
74 }
75 
76 SQUARE_SUM(f, float, 0.f)
77 SQUARE_SUM(d, double, 0.0)
78 
79 #define XCORRELATE(suffix, type, zero, small, sqrtfun)\
80 static type xcorrelate_##suffix(const type *x, \
81  const type *y, \
82  type sumx, \
83  type sumy, int size) \
84 { \
85  const type xm = sumx / size, ym = sumy / size; \
86  type num = zero, den, den0 = zero, den1 = zero; \
87  \
88  for (int i = 0; i < size; i++) { \
89  type xd = x[i] - xm; \
90  type yd = y[i] - ym; \
91  \
92  num += xd * yd; \
93  den0 += xd * xd; \
94  den1 += yd * yd; \
95  } \
96  \
97  num /= size; \
98  den = sqrtfun((den0 * den1) / size / size); \
99  \
100  return den <= small ? zero : num / den; \
101 }
102 
103 XCORRELATE(f, float, 0.f, 1e-6f, sqrtf)
104 XCORRELATE(d, double, 0.0, 1e-9, sqrt)
105 
106 #define XCORRELATE_SLOW(suffix, type) \
107 static int xcorrelate_slow_##suffix(AVFilterContext *ctx, \
108  AVFrame *out, int available) \
109 { \
110  AudioXCorrelateContext *s = ctx->priv; \
111  const int size = s->size; \
112  int used; \
113  \
114  for (int ch = 0; ch < out->ch_layout.nb_channels; ch++) { \
115  const type *x = (const type *)s->cache[0]->extended_data[ch]; \
116  const type *y = (const type *)s->cache[1]->extended_data[ch]; \
117  type *sumx = (type *)s->mean_sum[0]->extended_data[ch]; \
118  type *sumy = (type *)s->mean_sum[1]->extended_data[ch]; \
119  type *dst = (type *)out->extended_data[ch]; \
120  \
121  used = s->used; \
122  if (!used) { \
123  sumx[0] = mean_sum_##suffix(x, size); \
124  sumy[0] = mean_sum_##suffix(y, size); \
125  used = 1; \
126  } \
127  \
128  for (int n = 0; n < out->nb_samples; n++) { \
129  const int idx = n + size; \
130  \
131  dst[n] = xcorrelate_##suffix(x + n, y + n, \
132  sumx[0], sumy[0],\
133  size); \
134  \
135  sumx[0] -= x[n]; \
136  sumx[0] += x[idx]; \
137  sumy[0] -= y[n]; \
138  sumy[0] += y[idx]; \
139  } \
140  } \
141  \
142  return used; \
143 }
144 
145 XCORRELATE_SLOW(f, float)
146 XCORRELATE_SLOW(d, double)
147 
148 #define clipf(x) (av_clipf(x, -1.f, 1.f))
149 #define clipd(x) (av_clipd(x, -1.0, 1.0))
150 
151 #define XCORRELATE_FAST(suffix, type, zero, small, sqrtfun, CLIP) \
152 static int xcorrelate_fast_##suffix(AVFilterContext *ctx, AVFrame *out, \
153  int available) \
154 { \
155  AudioXCorrelateContext *s = ctx->priv; \
156  const int size = s->size; \
157  int used; \
158  \
159  for (int ch = 0; ch < out->ch_layout.nb_channels; ch++) { \
160  const type *x = (const type *)s->cache[0]->extended_data[ch]; \
161  const type *y = (const type *)s->cache[1]->extended_data[ch]; \
162  type *num_sum = (type *)s->num_sum->extended_data[ch]; \
163  type *den_sumx = (type *)s->den_sum[0]->extended_data[ch]; \
164  type *den_sumy = (type *)s->den_sum[1]->extended_data[ch]; \
165  type *dst = (type *)out->extended_data[ch]; \
166  \
167  used = s->used; \
168  if (!used) { \
169  num_sum[0] = square_sum_##suffix(x, y, size); \
170  den_sumx[0] = square_sum_##suffix(x, x, size); \
171  den_sumy[0] = square_sum_##suffix(y, y, size); \
172  used = 1; \
173  } \
174  \
175  for (int n = 0; n < out->nb_samples; n++) { \
176  const int idx = n + size; \
177  type num, den; \
178  \
179  num = num_sum[0] / size; \
180  den = sqrtfun((den_sumx[0] * den_sumy[0]) / size / size); \
181  \
182  dst[n] = den <= small ? zero : CLIP(num / den); \
183  \
184  num_sum[0] -= x[n] * y[n]; \
185  num_sum[0] += x[idx] * y[idx]; \
186  den_sumx[0] -= x[n] * x[n]; \
187  den_sumx[0] += x[idx] * x[idx]; \
188  den_sumx[0] = FFMAX(den_sumx[0], zero); \
189  den_sumy[0] -= y[n] * y[n]; \
190  den_sumy[0] += y[idx] * y[idx]; \
191  den_sumy[0] = FFMAX(den_sumy[0], zero); \
192  } \
193  } \
194  \
195  return used; \
196 }
197 
198 XCORRELATE_FAST(f, float, 0.f, 1e-6f, sqrtf, clipf)
199 XCORRELATE_FAST(d, double, 0.0, 1e-9, sqrt, clipd)
200 
201 #define XCORRELATE_BEST(suffix, type, zero, small, sqrtfun, FMAX, CLIP) \
202 static int xcorrelate_best_##suffix(AVFilterContext *ctx, AVFrame *out, \
203  int available) \
204 { \
205  AudioXCorrelateContext *s = ctx->priv; \
206  const int size = s->size; \
207  int used; \
208  \
209  for (int ch = 0; ch < out->ch_layout.nb_channels; ch++) { \
210  const type *x = (const type *)s->cache[0]->extended_data[ch]; \
211  const type *y = (const type *)s->cache[1]->extended_data[ch]; \
212  type *mean_sumx = (type *)s->mean_sum[0]->extended_data[ch]; \
213  type *mean_sumy = (type *)s->mean_sum[1]->extended_data[ch]; \
214  type *num_sum = (type *)s->num_sum->extended_data[ch]; \
215  type *den_sumx = (type *)s->den_sum[0]->extended_data[ch]; \
216  type *den_sumy = (type *)s->den_sum[1]->extended_data[ch]; \
217  type *dst = (type *)out->extended_data[ch]; \
218  \
219  used = s->used; \
220  if (!used) { \
221  num_sum[0] = square_sum_##suffix(x, y, size); \
222  den_sumx[0] = square_sum_##suffix(x, x, size); \
223  den_sumy[0] = square_sum_##suffix(y, y, size); \
224  mean_sumx[0] = mean_sum_##suffix(x, size); \
225  mean_sumy[0] = mean_sum_##suffix(y, size); \
226  used = 1; \
227  } \
228  \
229  for (int n = 0; n < out->nb_samples; n++) { \
230  const int idx = n + size; \
231  type num, den, xm, ym; \
232  \
233  xm = mean_sumx[0] / size; \
234  ym = mean_sumy[0] / size; \
235  num = num_sum[0] - size * xm * ym; \
236  den = sqrtfun(FMAX(den_sumx[0] - size * xm * xm, zero)) * \
237  sqrtfun(FMAX(den_sumy[0] - size * ym * ym, zero)); \
238  \
239  dst[n] = den <= small ? zero : CLIP(num / den); \
240  \
241  mean_sumx[0]-= x[n]; \
242  mean_sumx[0]+= x[idx]; \
243  mean_sumy[0]-= y[n]; \
244  mean_sumy[0]+= y[idx]; \
245  num_sum[0] -= x[n] * y[n]; \
246  num_sum[0] += x[idx] * y[idx]; \
247  den_sumx[0] -= x[n] * x[n]; \
248  den_sumx[0] += x[idx] * x[idx]; \
249  den_sumx[0] = FMAX(den_sumx[0], zero); \
250  den_sumy[0] -= y[n] * y[n]; \
251  den_sumy[0] += y[idx] * y[idx]; \
252  den_sumy[0] = FMAX(den_sumy[0], zero); \
253  } \
254  } \
255  \
256  return used; \
257 }
258 
259 XCORRELATE_BEST(f, float, 0.f, 1e-6f, sqrtf, fmaxf, clipf)
260 XCORRELATE_BEST(d, double, 0.0, 1e-9, sqrt, fmax, clipd)
261 
263 {
264  AudioXCorrelateContext *s = ctx->priv;
265  AVFilterLink *outlink = ctx->outputs[0];
266  AVFrame *frame = NULL;
267  int ret, status;
268  int available;
269  int64_t pts;
270 
272 
273  for (int i = 0; i < 2 && !s->eof; i++) {
274  ret = ff_inlink_consume_frame(ctx->inputs[i], &frame);
275  if (ret > 0) {
276  if (s->pts == AV_NOPTS_VALUE)
277  s->pts = frame->pts;
278  ret = av_audio_fifo_write(s->fifo[i], (void **)frame->extended_data,
279  frame->nb_samples);
281  if (ret < 0)
282  return ret;
283  }
284  }
285 
286  available = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
287  if (available > s->size) {
288  const int out_samples = available - s->size;
289  AVFrame *out;
290 
291  if (!s->cache[0] || s->cache[0]->nb_samples < available) {
292  av_frame_free(&s->cache[0]);
293  s->cache[0] = ff_get_audio_buffer(outlink, available);
294  if (!s->cache[0])
295  return AVERROR(ENOMEM);
296  }
297 
298  if (!s->cache[1] || s->cache[1]->nb_samples < available) {
299  av_frame_free(&s->cache[1]);
300  s->cache[1] = ff_get_audio_buffer(outlink, available);
301  if (!s->cache[1])
302  return AVERROR(ENOMEM);
303  }
304 
305  ret = av_audio_fifo_peek(s->fifo[0], (void **)s->cache[0]->extended_data, available);
306  if (ret < 0)
307  return ret;
308 
309  ret = av_audio_fifo_peek(s->fifo[1], (void **)s->cache[1]->extended_data, available);
310  if (ret < 0)
311  return ret;
312 
313  out = ff_get_audio_buffer(outlink, out_samples);
314  if (!out)
315  return AVERROR(ENOMEM);
316 
317  s->used = s->xcorrelate(ctx, out, available);
318 
319  out->pts = s->pts;
320  s->pts += out_samples;
321 
322  av_audio_fifo_drain(s->fifo[0], out_samples);
323  av_audio_fifo_drain(s->fifo[1], out_samples);
324 
325  return ff_filter_frame(outlink, out);
326  }
327 
328  for (int i = 0; i < 2 && !s->eof; i++) {
329  if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
330  AVFrame *silence = ff_get_audio_buffer(outlink, s->size);
331 
332  s->eof = 1;
333  if (!silence)
334  return AVERROR(ENOMEM);
335 
336  av_audio_fifo_write(s->fifo[0], (void **)silence->extended_data,
337  silence->nb_samples);
338 
339  av_audio_fifo_write(s->fifo[1], (void **)silence->extended_data,
340  silence->nb_samples);
341 
342  av_frame_free(&silence);
343  }
344  }
345 
346  if (s->eof &&
347  (av_audio_fifo_size(s->fifo[0]) <= s->size ||
348  av_audio_fifo_size(s->fifo[1]) <= s->size)) {
349  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
350  return 0;
351  }
352 
353  if ((av_audio_fifo_size(s->fifo[0]) > s->size &&
354  av_audio_fifo_size(s->fifo[1]) > s->size) || s->eof) {
356  return 0;
357  }
358 
359  if (ff_outlink_frame_wanted(outlink) && !s->eof) {
360  for (int i = 0; i < 2; i++) {
361  if (av_audio_fifo_size(s->fifo[i]) > s->size)
362  continue;
363  ff_inlink_request_frame(ctx->inputs[i]);
364  return 0;
365  }
366  }
367 
368  return FFERROR_NOT_READY;
369 }
370 
371 static int config_output(AVFilterLink *outlink)
372 {
373  AVFilterContext *ctx = outlink->src;
374  AudioXCorrelateContext *s = ctx->priv;
375 
376  s->pts = AV_NOPTS_VALUE;
377 
378  s->fifo[0] = av_audio_fifo_alloc(outlink->format, outlink->ch_layout.nb_channels, s->size);
379  s->fifo[1] = av_audio_fifo_alloc(outlink->format, outlink->ch_layout.nb_channels, s->size);
380  if (!s->fifo[0] || !s->fifo[1])
381  return AVERROR(ENOMEM);
382 
383  s->mean_sum[0] = ff_get_audio_buffer(outlink, 1);
384  s->mean_sum[1] = ff_get_audio_buffer(outlink, 1);
385  s->num_sum = ff_get_audio_buffer(outlink, 1);
386  s->den_sum[0] = ff_get_audio_buffer(outlink, 1);
387  s->den_sum[1] = ff_get_audio_buffer(outlink, 1);
388  if (!s->mean_sum[0] || !s->mean_sum[1] || !s->num_sum ||
389  !s->den_sum[0] || !s->den_sum[1])
390  return AVERROR(ENOMEM);
391 
392  switch (s->algo) {
393  case 0: s->xcorrelate = xcorrelate_slow_f; break;
394  case 1: s->xcorrelate = xcorrelate_fast_f; break;
395  case 2: s->xcorrelate = xcorrelate_best_f; break;
396  }
397 
398  if (outlink->format == AV_SAMPLE_FMT_DBLP) {
399  switch (s->algo) {
400  case 0: s->xcorrelate = xcorrelate_slow_d; break;
401  case 1: s->xcorrelate = xcorrelate_fast_d; break;
402  case 2: s->xcorrelate = xcorrelate_best_d; break;
403  }
404  }
405 
406  return 0;
407 }
408 
410 {
411  AudioXCorrelateContext *s = ctx->priv;
412 
413  av_audio_fifo_free(s->fifo[0]);
414  av_audio_fifo_free(s->fifo[1]);
415  av_frame_free(&s->cache[0]);
416  av_frame_free(&s->cache[1]);
417  av_frame_free(&s->mean_sum[0]);
418  av_frame_free(&s->mean_sum[1]);
419  av_frame_free(&s->num_sum);
420  av_frame_free(&s->den_sum[0]);
421  av_frame_free(&s->den_sum[1]);
422 }
423 
424 static const AVFilterPad inputs[] = {
425  {
426  .name = "axcorrelate0",
427  .type = AVMEDIA_TYPE_AUDIO,
428  },
429  {
430  .name = "axcorrelate1",
431  .type = AVMEDIA_TYPE_AUDIO,
432  },
433 };
434 
435 static const AVFilterPad outputs[] = {
436  {
437  .name = "default",
438  .type = AVMEDIA_TYPE_AUDIO,
439  .config_props = config_output,
440  },
441 };
442 
443 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
444 #define OFFSET(x) offsetof(AudioXCorrelateContext, x)
445 
446 static const AVOption axcorrelate_options[] = {
447  { "size", "set the segment size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=256}, 2, 131072, AF },
448  { "algo", "set the algorithm", OFFSET(algo), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, AF, .unit = "algo" },
449  { "slow", "slow algorithm", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "algo" },
450  { "fast", "fast algorithm", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "algo" },
451  { "best", "best algorithm", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "algo" },
452  { NULL }
453 };
454 
455 AVFILTER_DEFINE_CLASS(axcorrelate);
456 
458  .name = "axcorrelate",
459  .description = NULL_IF_CONFIG_SMALL("Cross-correlate two audio streams."),
460  .priv_size = sizeof(AudioXCorrelateContext),
461  .priv_class = &axcorrelate_class,
462  .activate = activate,
463  .uninit = uninit,
467 };
av_audio_fifo_free
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
Definition: audio_fifo.c:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioXCorrelateContext::eof
int eof
Definition: af_axcorrelate.c:43
out
FILE * out
Definition: movenc.c:55
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
XCORRELATE_FAST
#define XCORRELATE_FAST(suffix, type, zero, small, sqrtfun, CLIP)
Definition: af_axcorrelate.c:151
av_audio_fifo_write
int av_audio_fifo_write(AVAudioFifo *af, void *const *data, int nb_samples)
Write data to an AVAudioFifo.
Definition: audio_fifo.c:119
clipd
#define clipd(x)
Definition: af_axcorrelate.c:149
int64_t
long long int64_t
Definition: coverity.c:34
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(axcorrelate)
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVOption
AVOption.
Definition: opt.h:429
AudioXCorrelateContext::fifo
AVAudioFifo * fifo[2]
Definition: af_axcorrelate.c:37
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:327
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1491
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:447
AVAudioFifo
Context for an Audio FIFO Buffer.
Definition: audio_fifo.c:37
av_audio_fifo_drain
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
Definition: audio_fifo.c:195
XCORRELATE
#define XCORRELATE(suffix, type, zero, small, sqrtfun)
Definition: af_axcorrelate.c:79
pts
static int64_t pts
Definition: transcode_aac.c:644
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
SQUARE_SUM
#define SQUARE_SUM(suffix, type, zero)
Definition: af_axcorrelate.c:63
av_cold
#define av_cold
Definition: attributes.h:90
FILTER_SAMPLEFMTS
#define FILTER_SAMPLEFMTS(...)
Definition: filters.h:250
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:424
OFFSET
#define OFFSET(x)
Definition: af_axcorrelate.c:444
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1594
AudioXCorrelateContext::num_sum
AVFrame * num_sum
Definition: af_axcorrelate.c:40
s
#define s(width, name)
Definition: cbs_vp9.c:198
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_axcorrelate.c:371
AudioXCorrelateContext::used
int used
Definition: af_axcorrelate.c:42
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
filters.h
AudioXCorrelateContext
Definition: af_axcorrelate.c:30
ctx
AVFormatContext * ctx
Definition: movenc.c:49
ff_af_axcorrelate
const AVFilter ff_af_axcorrelate
Definition: af_axcorrelate.c:457
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
AudioXCorrelateContext::xcorrelate
int(* xcorrelate)(AVFilterContext *ctx, AVFrame *out, int available)
Definition: af_axcorrelate.c:45
av_audio_fifo_alloc
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
Definition: audio_fifo.c:62
AudioXCorrelateContext::mean_sum
AVFrame * mean_sum[2]
Definition: af_axcorrelate.c:39
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
AudioXCorrelateContext::algo
int algo
Definition: af_axcorrelate.c:34
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1438
f
f
Definition: af_crystalizer.c:122
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
fmaxf
float fmaxf(float, float)
AF
#define AF
Definition: af_axcorrelate.c:443
XCORRELATE_SLOW
#define XCORRELATE_SLOW(suffix, type)
Definition: af_axcorrelate.c:106
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AudioXCorrelateContext::size
int size
Definition: af_axcorrelate.c:33
av_audio_fifo_peek
int av_audio_fifo_peek(const AVAudioFifo *af, void *const *data, int nb_samples)
Peek data from an AVAudioFifo.
Definition: audio_fifo.c:145
av_audio_fifo_size
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
Definition: audio_fifo.c:222
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_axcorrelate.c:409
AudioXCorrelateContext::cache
AVFrame * cache[2]
Definition: af_axcorrelate.c:38
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
algo
Definition: dct.c:56
AudioXCorrelateContext::den_sum
AVFrame * den_sum[2]
Definition: af_axcorrelate.c:41
available
if no frame is available
Definition: filter_design.txt:166
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:450
common.h
axcorrelate_options
static const AVOption axcorrelate_options[]
Definition: af_axcorrelate.c:446
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
audio_fifo.h
inputs
static const AVFilterPad inputs[]
Definition: af_axcorrelate.c:424
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
clipf
#define clipf(x)
Definition: af_axcorrelate.c:148
AVFilter
Filter definition.
Definition: avfilter.h:201
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
activate
static int activate(AVFilterContext *ctx)
Definition: af_axcorrelate.c:262
fmax
double fmax(double, double)
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
channel_layout.h
AudioXCorrelateContext::pts
int64_t pts
Definition: af_axcorrelate.c:35
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
audio.h
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
MEAN_SUM
#define MEAN_SUM(suffix, type, zero)
Definition: af_axcorrelate.c:48
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:239
XCORRELATE_BEST
#define XCORRELATE_BEST(suffix, type, zero, small, sqrtfun, FMAX, CLIP)
Definition: af_axcorrelate.c:201
outputs
static const AVFilterPad outputs[]
Definition: af_axcorrelate.c:435