FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_convolution.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2013 Oka Motofumi (chikuzen.mo at gmail dot com)
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avstring.h"
23 #include "libavutil/imgutils.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "avfilter.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "video.h"
30 
31 typedef struct ConvolutionContext {
32  const AVClass *class;
33 
34  char *matrix_str[4];
35  float rdiv[4];
36  float bias[4];
37  float scale;
38  float delta;
39  int planes;
40 
41  int size[4];
42  int depth;
43  int bpc;
44  int bstride;
47  int nb_planes;
49  int planewidth[4];
50  int planeheight[4];
51  int matrix[4][49];
52  int matrix_length[4];
53  int copy[4];
54 
55  int (*filter[4])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
57 
58 #define OFFSET(x) offsetof(ConvolutionContext, x)
59 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
60 
61 static const AVOption convolution_options[] = {
62  { "0m", "set matrix for 1st plane", OFFSET(matrix_str[0]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
63  { "1m", "set matrix for 2nd plane", OFFSET(matrix_str[1]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
64  { "2m", "set matrix for 3rd plane", OFFSET(matrix_str[2]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
65  { "3m", "set matrix for 4th plane", OFFSET(matrix_str[3]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
66  { "0rdiv", "set rdiv for 1st plane", OFFSET(rdiv[0]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
67  { "1rdiv", "set rdiv for 2nd plane", OFFSET(rdiv[1]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
68  { "2rdiv", "set rdiv for 3rd plane", OFFSET(rdiv[2]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
69  { "3rdiv", "set rdiv for 4th plane", OFFSET(rdiv[3]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
70  { "0bias", "set bias for 1st plane", OFFSET(bias[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
71  { "1bias", "set bias for 2nd plane", OFFSET(bias[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
72  { "2bias", "set bias for 3rd plane", OFFSET(bias[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
73  { "3bias", "set bias for 4th plane", OFFSET(bias[3]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
74  { NULL }
75 };
76 
77 AVFILTER_DEFINE_CLASS(convolution);
78 
79 static const int same3x3[9] = {0, 0, 0,
80  0, 1, 0,
81  0, 0, 0};
82 
83 static const int same5x5[25] = {0, 0, 0, 0, 0,
84  0, 0, 0, 0, 0,
85  0, 0, 1, 0, 0,
86  0, 0, 0, 0, 0,
87  0, 0, 0, 0, 0};
88 
89 static const int same7x7[49] = {0, 0, 0, 0, 0, 0, 0,
90  0, 0, 0, 0, 0, 0, 0,
91  0, 0, 0, 0, 0, 0, 0,
92  0, 0, 0, 1, 0, 0, 0,
93  0, 0, 0, 0, 0, 0, 0,
94  0, 0, 0, 0, 0, 0, 0,
95  0, 0, 0, 0, 0, 0, 0};
96 
98 {
99  static const enum AVPixelFormat pix_fmts[] = {
118  };
119 
120  return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
121 }
122 
123 static inline void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
124 {
125  int i;
126 
127  memcpy(line, srcp, width);
128 
129  for (i = mergin; i > 0; i--) {
130  line[-i] = line[i];
131  line[width - 1 + i] = line[width - 1 - i];
132  }
133 }
134 
135 static inline void line_copy16(uint16_t *line, const uint16_t *srcp, int width, int mergin)
136 {
137  int i;
138 
139  memcpy(line, srcp, width * 2);
140 
141  for (i = mergin; i > 0; i--) {
142  line[-i] = line[i];
143  line[width - 1 + i] = line[width - 1 - i];
144  }
145 }
146 
147 typedef struct ThreadData {
148  AVFrame *in, *out;
149  int plane;
150 } ThreadData;
151 
152 static int filter16_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
153 {
154  ConvolutionContext *s = ctx->priv;
155  ThreadData *td = arg;
156  AVFrame *in = td->in;
157  AVFrame *out = td->out;
158  const int plane = td->plane;
159  const int peak = (1 << s->depth) - 1;
160  const int stride = in->linesize[plane] / 2;
161  const int bstride = s->bstride;
162  const int height = s->planeheight[plane];
163  const int width = s->planewidth[plane];
164  const int slice_start = (height * jobnr) / nb_jobs;
165  const int slice_end = (height * (jobnr+1)) / nb_jobs;
166  const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
167  uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
168  const float scale = s->scale;
169  const float delta = s->delta;
170  uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
171  uint16_t *p1 = p0 + bstride;
172  uint16_t *p2 = p1 + bstride;
173  uint16_t *orig = p0, *end = p2;
174  int y, x;
175 
176  line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
177  line_copy16(p1, src, width, 1);
178 
179  for (y = slice_start; y < slice_end; y++) {
180  src += stride * (y < height - 1 ? 1 : -1);
181  line_copy16(p2, src, width, 1);
182 
183  for (x = 0; x < width; x++) {
184  int suma = p0[x - 1] * -1 +
185  p0[x] * -1 +
186  p0[x + 1] * -1 +
187  p2[x - 1] * 1 +
188  p2[x] * 1 +
189  p2[x + 1] * 1;
190  int sumb = p0[x - 1] * -1 +
191  p0[x + 1] * 1 +
192  p1[x - 1] * -1 +
193  p1[x + 1] * 1 +
194  p2[x - 1] * -1 +
195  p2[x + 1] * 1;
196 
197  dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
198  }
199 
200  p0 = p1;
201  p1 = p2;
202  p2 = (p2 == end) ? orig: p2 + bstride;
203  dst += out->linesize[plane] / 2;
204  }
205 
206  return 0;
207 }
208 
209 static int filter16_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
210 {
211  ConvolutionContext *s = ctx->priv;
212  ThreadData *td = arg;
213  AVFrame *in = td->in;
214  AVFrame *out = td->out;
215  const int plane = td->plane;
216  const int peak = (1 << s->depth) - 1;
217  const int stride = in->linesize[plane] / 2;
218  const int bstride = s->bstride;
219  const int height = s->planeheight[plane];
220  const int width = s->planewidth[plane];
221  const int slice_start = (height * jobnr) / nb_jobs;
222  const int slice_end = (height * (jobnr+1)) / nb_jobs;
223  const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
224  uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
225  const float scale = s->scale;
226  const float delta = s->delta;
227  uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
228  uint16_t *p1 = p0 + bstride;
229  uint16_t *p2 = p1 + bstride;
230  uint16_t *orig = p0, *end = p2;
231  int y, x;
232 
233  line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
234  line_copy16(p1, src, width, 1);
235 
236  for (y = slice_start; y < slice_end; y++) {
237  src += stride * (y < height - 1 ? 1 : -1);
238  line_copy16(p2, src, width, 1);
239 
240  for (x = 0; x < width; x++) {
241  int suma = p0[x - 1] * 1 +
242  p1[x ] * -1;
243  int sumb = p0[x ] * 1 +
244  p1[x - 1] * -1;
245 
246  dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
247  }
248 
249  p0 = p1;
250  p1 = p2;
251  p2 = (p2 == end) ? orig: p2 + bstride;
252  dst += out->linesize[plane] / 2;
253  }
254 
255  return 0;
256 }
257 
258 static int filter16_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
259 {
260  ConvolutionContext *s = ctx->priv;
261  ThreadData *td = arg;
262  AVFrame *in = td->in;
263  AVFrame *out = td->out;
264  const int plane = td->plane;
265  const int peak = (1 << s->depth) - 1;
266  const int stride = in->linesize[plane] / 2;
267  const int bstride = s->bstride;
268  const int height = s->planeheight[plane];
269  const int width = s->planewidth[plane];
270  const int slice_start = (height * jobnr) / nb_jobs;
271  const int slice_end = (height * (jobnr+1)) / nb_jobs;
272  const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
273  uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
274  const float scale = s->scale;
275  const float delta = s->delta;
276  uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
277  uint16_t *p1 = p0 + bstride;
278  uint16_t *p2 = p1 + bstride;
279  uint16_t *orig = p0, *end = p2;
280  int y, x;
281 
282  line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
283  line_copy16(p1, src, width, 1);
284 
285  for (y = slice_start; y < slice_end; y++) {
286  src += stride * (y < height - 1 ? 1 : -1);
287  line_copy16(p2, src, width, 1);
288 
289  for (x = 0; x < width; x++) {
290  int suma = p0[x - 1] * -1 +
291  p0[x] * -2 +
292  p0[x + 1] * -1 +
293  p2[x - 1] * 1 +
294  p2[x] * 2 +
295  p2[x + 1] * 1;
296  int sumb = p0[x - 1] * -1 +
297  p0[x + 1] * 1 +
298  p1[x - 1] * -2 +
299  p1[x + 1] * 2 +
300  p2[x - 1] * -1 +
301  p2[x + 1] * 1;
302 
303  dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
304  }
305 
306  p0 = p1;
307  p1 = p2;
308  p2 = (p2 == end) ? orig: p2 + bstride;
309  dst += out->linesize[plane] / 2;
310  }
311 
312  return 0;
313 }
314 
315 static int filter_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
316 {
317  ConvolutionContext *s = ctx->priv;
318  ThreadData *td = arg;
319  AVFrame *in = td->in;
320  AVFrame *out = td->out;
321  const int plane = td->plane;
322  const int stride = in->linesize[plane];
323  const int bstride = s->bstride;
324  const int height = s->planeheight[plane];
325  const int width = s->planewidth[plane];
326  const int slice_start = (height * jobnr) / nb_jobs;
327  const int slice_end = (height * (jobnr+1)) / nb_jobs;
328  const uint8_t *src = in->data[plane] + slice_start * stride;
329  uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
330  const float scale = s->scale;
331  const float delta = s->delta;
332  uint8_t *p0 = s->bptrs[jobnr] + 16;
333  uint8_t *p1 = p0 + bstride;
334  uint8_t *p2 = p1 + bstride;
335  uint8_t *orig = p0, *end = p2;
336  int y, x;
337 
338  line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
339  line_copy8(p1, src, width, 1);
340 
341  for (y = slice_start; y < slice_end; y++) {
342  src += stride * (y < height - 1 ? 1 : -1);
343  line_copy8(p2, src, width, 1);
344 
345  for (x = 0; x < width; x++) {
346  int suma = p0[x - 1] * -1 +
347  p0[x] * -1 +
348  p0[x + 1] * -1 +
349  p2[x - 1] * 1 +
350  p2[x] * 1 +
351  p2[x + 1] * 1;
352  int sumb = p0[x - 1] * -1 +
353  p0[x + 1] * 1 +
354  p1[x - 1] * -1 +
355  p1[x + 1] * 1 +
356  p2[x - 1] * -1 +
357  p2[x + 1] * 1;
358 
359  dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
360  }
361 
362  p0 = p1;
363  p1 = p2;
364  p2 = (p2 == end) ? orig: p2 + bstride;
365  dst += out->linesize[plane];
366  }
367 
368  return 0;
369 }
370 
371 static int filter_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
372 {
373  ConvolutionContext *s = ctx->priv;
374  ThreadData *td = arg;
375  AVFrame *in = td->in;
376  AVFrame *out = td->out;
377  const int plane = td->plane;
378  const int stride = in->linesize[plane];
379  const int bstride = s->bstride;
380  const int height = s->planeheight[plane];
381  const int width = s->planewidth[plane];
382  const int slice_start = (height * jobnr) / nb_jobs;
383  const int slice_end = (height * (jobnr+1)) / nb_jobs;
384  const uint8_t *src = in->data[plane] + slice_start * stride;
385  uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
386  const float scale = s->scale;
387  const float delta = s->delta;
388  uint8_t *p0 = s->bptrs[jobnr] + 16;
389  uint8_t *p1 = p0 + bstride;
390  uint8_t *p2 = p1 + bstride;
391  uint8_t *orig = p0, *end = p2;
392  int y, x;
393 
394  line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
395  line_copy8(p1, src, width, 1);
396 
397  for (y = slice_start; y < slice_end; y++) {
398  src += stride * (y < height - 1 ? 1 : -1);
399  line_copy8(p2, src, width, 1);
400 
401  for (x = 0; x < width; x++) {
402  int suma = p0[x - 1] * 1 +
403  p1[x ] * -1;
404  int sumb = p0[x ] * 1 +
405  p1[x - 1] * -1;
406 
407  dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
408  }
409 
410  p0 = p1;
411  p1 = p2;
412  p2 = (p2 == end) ? orig: p2 + bstride;
413  dst += out->linesize[plane];
414  }
415 
416  return 0;
417 }
418 
419 static int filter_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
420 {
421  ConvolutionContext *s = ctx->priv;
422  ThreadData *td = arg;
423  AVFrame *in = td->in;
424  AVFrame *out = td->out;
425  const int plane = td->plane;
426  const int stride = in->linesize[plane];
427  const int bstride = s->bstride;
428  const int height = s->planeheight[plane];
429  const int width = s->planewidth[plane];
430  const int slice_start = (height * jobnr) / nb_jobs;
431  const int slice_end = (height * (jobnr+1)) / nb_jobs;
432  const uint8_t *src = in->data[plane] + slice_start * stride;
433  uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
434  const float scale = s->scale;
435  const float delta = s->delta;
436  uint8_t *p0 = s->bptrs[jobnr] + 16;
437  uint8_t *p1 = p0 + bstride;
438  uint8_t *p2 = p1 + bstride;
439  uint8_t *orig = p0, *end = p2;
440  int y, x;
441 
442  line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
443  line_copy8(p1, src, width, 1);
444 
445  for (y = slice_start; y < slice_end; y++) {
446  src += stride * (y < height - 1 ? 1 : -1);
447  line_copy8(p2, src, width, 1);
448 
449  for (x = 0; x < width; x++) {
450  int suma = p0[x - 1] * -1 +
451  p0[x] * -2 +
452  p0[x + 1] * -1 +
453  p2[x - 1] * 1 +
454  p2[x] * 2 +
455  p2[x + 1] * 1;
456  int sumb = p0[x - 1] * -1 +
457  p0[x + 1] * 1 +
458  p1[x - 1] * -2 +
459  p1[x + 1] * 2 +
460  p2[x - 1] * -1 +
461  p2[x + 1] * 1;
462 
463  dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
464  }
465 
466  p0 = p1;
467  p1 = p2;
468  p2 = (p2 == end) ? orig: p2 + bstride;
469  dst += out->linesize[plane];
470  }
471 
472  return 0;
473 }
474 
475 static int filter16_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
476 {
477  ConvolutionContext *s = ctx->priv;
478  ThreadData *td = arg;
479  AVFrame *in = td->in;
480  AVFrame *out = td->out;
481  const int plane = td->plane;
482  const int peak = (1 << s->depth) - 1;
483  const int stride = in->linesize[plane] / 2;
484  const int bstride = s->bstride;
485  const int height = s->planeheight[plane];
486  const int width = s->planewidth[plane];
487  const int slice_start = (height * jobnr) / nb_jobs;
488  const int slice_end = (height * (jobnr+1)) / nb_jobs;
489  const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
490  uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
491  uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
492  uint16_t *p1 = p0 + bstride;
493  uint16_t *p2 = p1 + bstride;
494  uint16_t *orig = p0, *end = p2;
495  const int *matrix = s->matrix[plane];
496  const float rdiv = s->rdiv[plane];
497  const float bias = s->bias[plane];
498  int y, x;
499 
500  line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
501  line_copy16(p1, src, width, 1);
502 
503  for (y = slice_start; y < slice_end; y++) {
504  src += stride * (y < height - 1 ? 1 : -1);
505  line_copy16(p2, src, width, 1);
506 
507  for (x = 0; x < width; x++) {
508  int sum = p0[x - 1] * matrix[0] +
509  p0[x] * matrix[1] +
510  p0[x + 1] * matrix[2] +
511  p1[x - 1] * matrix[3] +
512  p1[x] * matrix[4] +
513  p1[x + 1] * matrix[5] +
514  p2[x - 1] * matrix[6] +
515  p2[x] * matrix[7] +
516  p2[x + 1] * matrix[8];
517  sum = (int)(sum * rdiv + bias + 0.5f);
518  dst[x] = av_clip(sum, 0, peak);
519  }
520 
521  p0 = p1;
522  p1 = p2;
523  p2 = (p2 == end) ? orig: p2 + bstride;
524  dst += out->linesize[plane] / 2;
525  }
526 
527  return 0;
528 }
529 
530 static int filter16_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
531 {
532  ConvolutionContext *s = ctx->priv;
533  ThreadData *td = arg;
534  AVFrame *in = td->in;
535  AVFrame *out = td->out;
536  const int plane = td->plane;
537  const int peak = (1 << s->depth) - 1;
538  const int stride = in->linesize[plane] / 2;
539  const int bstride = s->bstride;
540  const int height = s->planeheight[plane];
541  const int width = s->planewidth[plane];
542  const int slice_start = (height * jobnr) / nb_jobs;
543  const int slice_end = (height * (jobnr+1)) / nb_jobs;
544  const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
545  uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
546  uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
547  uint16_t *p1 = p0 + bstride;
548  uint16_t *p2 = p1 + bstride;
549  uint16_t *p3 = p2 + bstride;
550  uint16_t *p4 = p3 + bstride;
551  uint16_t *orig = p0, *end = p4;
552  const int *matrix = s->matrix[plane];
553  float rdiv = s->rdiv[plane];
554  float bias = s->bias[plane];
555  int y, x, i;
556 
557  line_copy16(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
558  line_copy16(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
559  line_copy16(p2, src, width, 2);
560  src += stride;
561  line_copy16(p3, src, width, 2);
562 
563  for (y = slice_start; y < slice_end; y++) {
564  uint16_t *array[] = {
565  p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
566  p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
567  p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
568  p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
569  p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
570  };
571 
572  src += stride * (y < height - 2 ? 1 : -1);
573  line_copy16(p4, src, width, 2);
574 
575  for (x = 0; x < width; x++) {
576  int sum = 0;
577 
578  for (i = 0; i < 25; i++) {
579  sum += *(array[i] + x) * matrix[i];
580  }
581  sum = (int)(sum * rdiv + bias + 0.5f);
582  dst[x] = av_clip(sum, 0, peak);
583  }
584 
585  p0 = p1;
586  p1 = p2;
587  p2 = p3;
588  p3 = p4;
589  p4 = (p4 == end) ? orig: p4 + bstride;
590  dst += out->linesize[plane] / 2;
591  }
592 
593  return 0;
594 }
595 
596 static int filter16_7x7(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
597 {
598  ConvolutionContext *s = ctx->priv;
599  ThreadData *td = arg;
600  AVFrame *in = td->in;
601  AVFrame *out = td->out;
602  const int plane = td->plane;
603  const int peak = (1 << s->depth) - 1;
604  const int stride = in->linesize[plane] / 2;
605  const int bstride = s->bstride;
606  const int height = s->planeheight[plane];
607  const int width = s->planewidth[plane];
608  const int slice_start = (height * jobnr) / nb_jobs;
609  const int slice_end = (height * (jobnr+1)) / nb_jobs;
610  const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
611  uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
612  uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 32;
613  uint16_t *p1 = p0 + bstride;
614  uint16_t *p2 = p1 + bstride;
615  uint16_t *p3 = p2 + bstride;
616  uint16_t *p4 = p3 + bstride;
617  uint16_t *p5 = p4 + bstride;
618  uint16_t *p6 = p5 + bstride;
619  uint16_t *orig = p0, *end = p6;
620  const int *matrix = s->matrix[plane];
621  float rdiv = s->rdiv[plane];
622  float bias = s->bias[plane];
623  int y, x, i;
624 
625  line_copy16(p0, src + 3 * stride * (slice_start < 3 ? 1 : -1), width, 3);
626  line_copy16(p1, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 3);
627  line_copy16(p2, src + stride * (slice_start == 0 ? 1 : -1), width, 3);
628  line_copy16(p3, src, width, 3);
629  src += stride;
630  line_copy16(p4, src, width, 3);
631  src += stride;
632  line_copy16(p5, src, width, 3);
633 
634  for (y = slice_start; y < slice_end; y++) {
635  uint16_t *array[] = {
636  p0 - 3, p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2, p0 + 3,
637  p1 - 3, p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2, p1 + 3,
638  p2 - 3, p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2, p2 + 3,
639  p3 - 3, p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2, p3 + 3,
640  p4 - 3, p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2, p4 + 3,
641  p5 - 3, p5 - 2, p5 - 1, p5, p5 + 1, p5 + 2, p5 + 3,
642  p6 - 3, p6 - 2, p6 - 1, p6, p6 + 1, p6 + 2, p6 + 3,
643  };
644 
645  src += stride * (y < height - 3 ? 1 : -1);
646  line_copy16(p6, src, width, 3);
647 
648  for (x = 0; x < width; x++) {
649  int sum = 0;
650 
651  for (i = 0; i < 25; i++) {
652  sum += *(array[i] + x) * matrix[i];
653  }
654  sum = (int)(sum * rdiv + bias + 0.5f);
655  dst[x] = av_clip(sum, 0, peak);
656  }
657 
658  p0 = p1;
659  p1 = p2;
660  p2 = p3;
661  p3 = p4;
662  p4 = p5;
663  p5 = p6;
664  p6 = (p6 == end) ? orig: p6 + bstride;
665  dst += out->linesize[plane] / 2;
666  }
667 
668  return 0;
669 }
670 
671 static int filter_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
672 {
673  ConvolutionContext *s = ctx->priv;
674  ThreadData *td = arg;
675  AVFrame *in = td->in;
676  AVFrame *out = td->out;
677  const int plane = td->plane;
678  const int stride = in->linesize[plane];
679  const int bstride = s->bstride;
680  const int height = s->planeheight[plane];
681  const int width = s->planewidth[plane];
682  const int slice_start = (height * jobnr) / nb_jobs;
683  const int slice_end = (height * (jobnr+1)) / nb_jobs;
684  const uint8_t *src = in->data[plane] + slice_start * stride;
685  uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
686  uint8_t *p0 = s->bptrs[jobnr] + 16;
687  uint8_t *p1 = p0 + bstride;
688  uint8_t *p2 = p1 + bstride;
689  uint8_t *orig = p0, *end = p2;
690  const int *matrix = s->matrix[plane];
691  const float rdiv = s->rdiv[plane];
692  const float bias = s->bias[plane];
693  int y, x;
694 
695  line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
696  line_copy8(p1, src, width, 1);
697 
698  for (y = slice_start; y < slice_end; y++) {
699  src += stride * (y < height - 1 ? 1 : -1);
700  line_copy8(p2, src, width, 1);
701 
702  for (x = 0; x < width; x++) {
703  int sum = p0[x - 1] * matrix[0] +
704  p0[x] * matrix[1] +
705  p0[x + 1] * matrix[2] +
706  p1[x - 1] * matrix[3] +
707  p1[x] * matrix[4] +
708  p1[x + 1] * matrix[5] +
709  p2[x - 1] * matrix[6] +
710  p2[x] * matrix[7] +
711  p2[x + 1] * matrix[8];
712  sum = (int)(sum * rdiv + bias + 0.5f);
713  dst[x] = av_clip_uint8(sum);
714  }
715 
716  p0 = p1;
717  p1 = p2;
718  p2 = (p2 == end) ? orig: p2 + bstride;
719  dst += out->linesize[plane];
720  }
721 
722  return 0;
723 }
724 
725 static int filter_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
726 {
727  ConvolutionContext *s = ctx->priv;
728  ThreadData *td = arg;
729  AVFrame *in = td->in;
730  AVFrame *out = td->out;
731  const int plane = td->plane;
732  const int stride = in->linesize[plane];
733  const int bstride = s->bstride;
734  const int height = s->planeheight[plane];
735  const int width = s->planewidth[plane];
736  const int slice_start = (height * jobnr) / nb_jobs;
737  const int slice_end = (height * (jobnr+1)) / nb_jobs;
738  const uint8_t *src = in->data[plane] + slice_start * stride;
739  uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
740  uint8_t *p0 = s->bptrs[jobnr] + 16;
741  uint8_t *p1 = p0 + bstride;
742  uint8_t *p2 = p1 + bstride;
743  uint8_t *p3 = p2 + bstride;
744  uint8_t *p4 = p3 + bstride;
745  uint8_t *orig = p0, *end = p4;
746  const int *matrix = s->matrix[plane];
747  float rdiv = s->rdiv[plane];
748  float bias = s->bias[plane];
749  int y, x, i;
750 
751  line_copy8(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
752  line_copy8(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
753  line_copy8(p2, src, width, 2);
754  src += stride;
755  line_copy8(p3, src, width, 2);
756 
757 
758  for (y = slice_start; y < slice_end; y++) {
759  uint8_t *array[] = {
760  p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
761  p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
762  p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
763  p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
764  p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
765  };
766 
767  src += stride * (y < height - 2 ? 1 : -1);
768  line_copy8(p4, src, width, 2);
769 
770  for (x = 0; x < width; x++) {
771  int sum = 0;
772 
773  for (i = 0; i < 25; i++) {
774  sum += *(array[i] + x) * matrix[i];
775  }
776  sum = (int)(sum * rdiv + bias + 0.5f);
777  dst[x] = av_clip_uint8(sum);
778  }
779 
780  p0 = p1;
781  p1 = p2;
782  p2 = p3;
783  p3 = p4;
784  p4 = (p4 == end) ? orig: p4 + bstride;
785  dst += out->linesize[plane];
786  }
787 
788  return 0;
789 }
790 
791 static int filter_7x7(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
792 {
793  ConvolutionContext *s = ctx->priv;
794  ThreadData *td = arg;
795  AVFrame *in = td->in;
796  AVFrame *out = td->out;
797  const int plane = td->plane;
798  const int stride = in->linesize[plane];
799  const int bstride = s->bstride;
800  const int height = s->planeheight[plane];
801  const int width = s->planewidth[plane];
802  const int slice_start = (height * jobnr) / nb_jobs;
803  const int slice_end = (height * (jobnr+1)) / nb_jobs;
804  const uint8_t *src = in->data[plane] + slice_start * stride;
805  uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
806  uint8_t *p0 = s->bptrs[jobnr] + 32;
807  uint8_t *p1 = p0 + bstride;
808  uint8_t *p2 = p1 + bstride;
809  uint8_t *p3 = p2 + bstride;
810  uint8_t *p4 = p3 + bstride;
811  uint8_t *p5 = p4 + bstride;
812  uint8_t *p6 = p5 + bstride;
813  uint8_t *orig = p0, *end = p6;
814  const int *matrix = s->matrix[plane];
815  float rdiv = s->rdiv[plane];
816  float bias = s->bias[plane];
817  int y, x, i;
818 
819  line_copy8(p0, src + 3 * stride * (slice_start < 3 ? 1 : -1), width, 3);
820  line_copy8(p1, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 3);
821  line_copy8(p2, src + stride * (slice_start == 0 ? 1 : -1), width, 3);
822  line_copy8(p3, src, width, 3);
823  src += stride;
824  line_copy8(p4, src, width, 3);
825  src += stride;
826  line_copy8(p5, src, width, 3);
827 
828  for (y = slice_start; y < slice_end; y++) {
829  uint8_t *array[] = {
830  p0 - 3, p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2, p0 + 3,
831  p1 - 3, p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2, p1 + 3,
832  p2 - 3, p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2, p2 + 3,
833  p3 - 3, p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2, p3 + 3,
834  p4 - 3, p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2, p4 + 3,
835  p5 - 3, p5 - 2, p5 - 1, p5, p5 + 1, p5 + 2, p5 + 3,
836  p6 - 3, p6 - 2, p6 - 1, p6, p6 + 1, p6 + 2, p6 + 3,
837  };
838 
839  src += stride * (y < height - 3 ? 1 : -1);
840  line_copy8(p6, src, width, 3);
841 
842  for (x = 0; x < width; x++) {
843  int sum = 0;
844 
845  for (i = 0; i < 49; i++) {
846  sum += *(array[i] + x) * matrix[i];
847  }
848  sum = (int)(sum * rdiv + bias + 0.5f);
849  dst[x] = av_clip_uint8(sum);
850  }
851 
852  p0 = p1;
853  p1 = p2;
854  p2 = p3;
855  p3 = p4;
856  p4 = p5;
857  p5 = p6;
858  p6 = (p6 == end) ? orig: p6 + bstride;
859  dst += out->linesize[plane];
860  }
861 
862  return 0;
863 }
864 
865 static int config_input(AVFilterLink *inlink)
866 {
867  AVFilterContext *ctx = inlink->dst;
868  ConvolutionContext *s = ctx->priv;
870  int p;
871 
872  s->depth = desc->comp[0].depth;
873 
874  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
875  s->planewidth[0] = s->planewidth[3] = inlink->w;
876  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
877  s->planeheight[0] = s->planeheight[3] = inlink->h;
878 
881  s->bptrs = av_calloc(s->nb_threads, sizeof(*s->bptrs));
882  if (!s->bptrs)
883  return AVERROR(ENOMEM);
884 
885  s->bstride = s->planewidth[0] + 64;
886  s->bpc = (s->depth + 7) / 8;
887  s->buffer = av_malloc_array(7 * s->bstride * s->nb_threads, s->bpc);
888  if (!s->buffer)
889  return AVERROR(ENOMEM);
890 
891  for (p = 0; p < s->nb_threads; p++) {
892  s->bptrs[p] = s->buffer + 7 * s->bstride * s->bpc * p;
893  }
894 
895  if (!strcmp(ctx->filter->name, "convolution")) {
896  if (s->depth > 8) {
897  for (p = 0; p < s->nb_planes; p++) {
898  if (s->size[p] == 3)
899  s->filter[p] = filter16_3x3;
900  else if (s->size[p] == 5)
901  s->filter[p] = filter16_5x5;
902  else if (s->size[p] == 7)
903  s->filter[p] = filter16_7x7;
904  }
905  }
906  } else if (!strcmp(ctx->filter->name, "prewitt")) {
907  if (s->depth > 8)
908  for (p = 0; p < s->nb_planes; p++)
909  s->filter[p] = filter16_prewitt;
910  } else if (!strcmp(ctx->filter->name, "roberts")) {
911  if (s->depth > 8)
912  for (p = 0; p < s->nb_planes; p++)
913  s->filter[p] = filter16_roberts;
914  } else if (!strcmp(ctx->filter->name, "sobel")) {
915  if (s->depth > 8)
916  for (p = 0; p < s->nb_planes; p++)
917  s->filter[p] = filter16_sobel;
918  }
919 
920  return 0;
921 }
922 
923 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
924 {
925  AVFilterContext *ctx = inlink->dst;
926  ConvolutionContext *s = ctx->priv;
927  AVFilterLink *outlink = ctx->outputs[0];
928  AVFrame *out;
929  int plane;
930 
931  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
932  if (!out) {
933  av_frame_free(&in);
934  return AVERROR(ENOMEM);
935  }
936  av_frame_copy_props(out, in);
937 
938  for (plane = 0; plane < s->nb_planes; plane++) {
939  ThreadData td;
940 
941  if (s->copy[plane]) {
942  av_image_copy_plane(out->data[plane], out->linesize[plane],
943  in->data[plane], in->linesize[plane],
944  s->planewidth[plane] * s->bpc,
945  s->planeheight[plane]);
946  continue;
947  }
948 
949  td.in = in;
950  td.out = out;
951  td.plane = plane;
952  ctx->internal->execute(ctx, s->filter[plane], &td, NULL, FFMIN(s->planeheight[plane], s->nb_threads));
953  }
954 
955  av_frame_free(&in);
956  return ff_filter_frame(outlink, out);
957 }
958 
960 {
961  ConvolutionContext *s = ctx->priv;
962  int i;
963 
964  if (!strcmp(ctx->filter->name, "convolution")) {
965  for (i = 0; i < 4; i++) {
966  int *matrix = (int *)s->matrix[i];
967  char *p, *arg, *saveptr = NULL;
968 
969  p = s->matrix_str[i];
970  while (s->matrix_length[i] < 49) {
971  if (!(arg = av_strtok(p, " ", &saveptr)))
972  break;
973 
974  p = NULL;
975  sscanf(arg, "%d", &matrix[s->matrix_length[i]]);
976  s->matrix_length[i]++;
977  }
978 
979  if (s->matrix_length[i] == 9) {
980  s->size[i] = 3;
981  if (!memcmp(matrix, same3x3, sizeof(same3x3)))
982  s->copy[i] = 1;
983  else
984  s->filter[i] = filter_3x3;
985  } else if (s->matrix_length[i] == 25) {
986  s->size[i] = 5;
987  if (!memcmp(matrix, same5x5, sizeof(same5x5)))
988  s->copy[i] = 1;
989  else
990  s->filter[i] = filter_5x5;
991  } else if (s->matrix_length[i] == 49) {
992  s->size[i] = 7;
993  if (!memcmp(matrix, same7x7, sizeof(same7x7)))
994  s->copy[i] = 1;
995  else
996  s->filter[i] = filter_7x7;
997  } else {
998  return AVERROR(EINVAL);
999  }
1000 
1001  if (s->copy[i] && (s->rdiv[i] != 1. || s->bias[i] != 0.))
1002  s->copy[i] = 0;
1003  }
1004  } else if (!strcmp(ctx->filter->name, "prewitt")) {
1005  for (i = 0; i < 4; i++) {
1006  if ((1 << i) & s->planes)
1007  s->filter[i] = filter_prewitt;
1008  else
1009  s->copy[i] = 1;
1010  }
1011  } else if (!strcmp(ctx->filter->name, "roberts")) {
1012  for (i = 0; i < 4; i++) {
1013  if ((1 << i) & s->planes)
1014  s->filter[i] = filter_roberts;
1015  else
1016  s->copy[i] = 1;
1017  }
1018  } else if (!strcmp(ctx->filter->name, "sobel")) {
1019  for (i = 0; i < 4; i++) {
1020  if ((1 << i) & s->planes)
1021  s->filter[i] = filter_sobel;
1022  else
1023  s->copy[i] = 1;
1024  }
1025  }
1026 
1027  return 0;
1028 }
1029 
1031 {
1032  ConvolutionContext *s = ctx->priv;
1033 
1034  av_freep(&s->bptrs);
1035  av_freep(&s->buffer);
1036 }
1037 
1039  {
1040  .name = "default",
1041  .type = AVMEDIA_TYPE_VIDEO,
1042  .config_props = config_input,
1043  .filter_frame = filter_frame,
1044  },
1045  { NULL }
1046 };
1047 
1049  {
1050  .name = "default",
1051  .type = AVMEDIA_TYPE_VIDEO,
1052  },
1053  { NULL }
1054 };
1055 
1056 #if CONFIG_CONVOLUTION_FILTER
1057 
1059  .name = "convolution",
1060  .description = NULL_IF_CONFIG_SMALL("Apply convolution filter."),
1061  .priv_size = sizeof(ConvolutionContext),
1062  .priv_class = &convolution_class,
1063  .init = init,
1064  .uninit = uninit,
1066  .inputs = convolution_inputs,
1067  .outputs = convolution_outputs,
1069 };
1070 
1071 #endif /* CONFIG_CONVOLUTION_FILTER */
1072 
1073 #if CONFIG_PREWITT_FILTER
1074 
1075 static const AVOption prewitt_options[] = {
1076  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
1077  { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
1078  { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
1079  { NULL }
1080 };
1081 
1082 AVFILTER_DEFINE_CLASS(prewitt);
1083 
1085  .name = "prewitt",
1086  .description = NULL_IF_CONFIG_SMALL("Apply prewitt operator."),
1087  .priv_size = sizeof(ConvolutionContext),
1088  .priv_class = &prewitt_class,
1089  .init = init,
1090  .uninit = uninit,
1092  .inputs = convolution_inputs,
1093  .outputs = convolution_outputs,
1095 };
1096 
1097 #endif /* CONFIG_PREWITT_FILTER */
1098 
1099 #if CONFIG_SOBEL_FILTER
1100 
1101 static const AVOption sobel_options[] = {
1102  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
1103  { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
1104  { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
1105  { NULL }
1106 };
1107 
1109 
1111  .name = "sobel",
1112  .description = NULL_IF_CONFIG_SMALL("Apply sobel operator."),
1113  .priv_size = sizeof(ConvolutionContext),
1114  .priv_class = &sobel_class,
1115  .init = init,
1116  .uninit = uninit,
1118  .inputs = convolution_inputs,
1119  .outputs = convolution_outputs,
1121 };
1122 
1123 #endif /* CONFIG_SOBEL_FILTER */
1124 
1125 #if CONFIG_ROBERTS_FILTER
1126 
1127 static const AVOption roberts_options[] = {
1128  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
1129  { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
1130  { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
1131  { NULL }
1132 };
1133 
1134 AVFILTER_DEFINE_CLASS(roberts);
1135 
1137  .name = "roberts",
1138  .description = NULL_IF_CONFIG_SMALL("Apply roberts cross operator."),
1139  .priv_size = sizeof(ConvolutionContext),
1140  .priv_class = &roberts_class,
1141  .init = init,
1142  .uninit = uninit,
1144  .inputs = convolution_inputs,
1145  .outputs = convolution_outputs,
1147 };
1148 
1149 #endif /* CONFIG_ROBERTS_FILTER */
int plane
Definition: avisynth_c.h:422
static int filter16_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:407
const char * s
Definition: avisynth_c.h:768
AVFrame * out
Definition: af_aiir.c:31
static int filter16_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:401
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
static int filter16_7x7(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:378
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:404
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
misc image utilities
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2403
Main libavfilter public API header.
static void sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize)
const char * desc
Definition: nvenc.c:65
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:164
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:384
static int filter16_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVFilter ff_vf_prewitt
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:349
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:372
static int filter_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
int(* filter[4])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:350
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
BYTE int const BYTE * srcp
Definition: avisynth_c.h:813
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:351
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFrame * in
Definition: af_aiir.c:31
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:97
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
float delta
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
static int filter_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:400
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:383
#define height
int plane
Definition: vf_blend.c:57
static av_cold void uninit(AVFilterContext *ctx)
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:96
static int flags
Definition: log.c:55
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:381
ptrdiff_t size
Definition: opengl_enc.c:101
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:373
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:406
static void line_copy16(uint16_t *line, const uint16_t *srcp, int width, int mergin)
AVFilter ff_vf_convolution
A filter pad used for either input or output.
Definition: internal.h:54
static void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:172
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
static const int same7x7[49]
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static int filter16_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
AVFilter ff_vf_sobel
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static const AVFilterPad convolution_inputs[]
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:408
const char * arg
Definition: jacosubdec.c:66
Definition: graph2dot.c:48
uint16_t width
Definition: gdv.c:47
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:389
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:371
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:390
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:366
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:387
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:352
static const AVFilterPad convolution_outputs[]
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
static const AVOption convolution_options[]
AVFormatContext * ctx
Definition: movenc.c:48
static int query_formats(AVFilterContext *ctx)
static av_cold int init(AVFilterContext *ctx)
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:405
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:367
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:386
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:379
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:376
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
static int filter16_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:173
AVFilter ff_vf_roberts
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:368
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:68
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
AVFILTER_DEFINE_CLASS(convolution)
static const int same3x3[9]
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:374
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:365
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:377
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:385
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:369
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:375
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
Y , 8bpp.
Definition: pixfmt.h:70
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:211
static const int same5x5[25]
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:402
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
static int filter_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:69
avfilter_execute_func * execute
Definition: internal.h:155
static const struct @272 planes[]
static int filter_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2029
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:254
static int config_input(AVFilterLink *inlink)
#define FLAGS
An instance of a filter.
Definition: avfilter.h:338
#define OFFSET(x)
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:95
static int array[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:106
#define av_malloc_array(a, b)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
static int filter_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define stride
internal API functions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
static int filter_7x7(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:380
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:652
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58