[FFmpeg-devel] [PATCH] avfilter: add audio parametric multi band equalizer filter
Ganesh Ajjanagadde
gajjanag at mit.edu
Wed Dec 23 19:19:39 CET 2015
On Wed, Dec 23, 2015 at 10:05 AM, Paul B Mahol <onemda at gmail.com> wrote:
> Signed-off-by: Paul B Mahol <onemda at gmail.com>
> ---
>
> I'm happy with feature set so I will apply this soon.
>
> ---
> configure | 1 +
> doc/filters.texi | 76 +++++
> libavfilter/Makefile | 1 +
> libavfilter/af_anequalizer.c | 679 +++++++++++++++++++++++++++++++++++++++++++
> libavfilter/allfilters.c | 1 +
> 5 files changed, 758 insertions(+)
> create mode 100644 libavfilter/af_anequalizer.c
>
> diff --git a/configure b/configure
> index 54c9789..3d81e87 100755
> --- a/configure
> +++ b/configure
> @@ -2838,6 +2838,7 @@ unix_protocol_select="network"
> # filters
> aemphasis_filter_deps="cabs cexp"
> amovie_filter_deps="avcodec avformat"
> +anequalizer_filter_deps="cabs cexp"
> aresample_filter_deps="swresample"
> ass_filter_deps="libass"
> asyncts_filter_deps="avresample"
> diff --git a/doc/filters.texi b/doc/filters.texi
> index a55cad4..68d7628 100644
> --- a/doc/filters.texi
> +++ b/doc/filters.texi
> @@ -992,6 +992,82 @@ stream ends. The default value is 2 seconds.
>
> @end table
>
> + at section anequalizer
> +
> +High-order parametric equalizer with unlimited number of bands for each channel.
> +
> +It accepts the following parameters:
> + at table @option
> + at item params
> +
> +This is option string is in format:
> +"c at var{chn} f=@var{cf} w=@var{w} g=@var{g} t=@var{f} | ..."
> +Each equalizer band is separated by '|'.
> +
> + at table @option
> + at item chn
> +Set channel number to which equalization will be applied.
> +If input doesn't have that channel the entry is ignored.
> +
> + at item cf
> +Set central frequency for band.
> +If input doesn't have that frequency the entry is ignored.
> +
> + at item w
> +Set band width in hertz.
> +
> + at item g
> +Set band gain in dB.
> +
> + at item f
> +Set filter type for band, optional, can be:
> +
> + at table @samp
> + at item 0
> +Butterworth, this is default.
> +
> + at item 1
> +Chebyshev type 1.
> +
> + at item 2
> +Chebyshev type 2.
> + at end table
> + at end table
> +
> + at item curves
> +With this option activated frequency response of anequalizer is displayed
> +in video stream.
> +
> + at item size
> +Set video stream size. Only useful if curves option is activated.
> +
> + at item mgain
> +Set max gain that will be displayed. Only useful if curves option is activated.
> +Setting this to reasonable value allows to display gain which is derived from
> +neighbour bands which are too close to each other and thus produce higher gain
> +when both are activated.
> +
> + at item fscale
> +Set frequency scale used to draw frequency response in video output.
> +Can be linear or logarithmic. Default is logarithmic.
> +
> + at item colors
> +Set color for each channel curve which is going to be displayed in video stream.
> +This is list of color names separated by space or by '|'.
> +Unrecognised or missing colors will be replaced by white color.
> + at end table
> +
> + at subsection Examples
> +
> + at itemize
> + at item
> +Lower gain by 10 of central frequency 200Hz and width 100 Hz
> +for first 2 channels using Chebyshev type 1 filter:
> + at example
> +anequalizer=c0 f=200 w=100 g=-10 t=1|c1 f=200 w=100 g=-10 t=1
> + at end example
> + at end itemize
> +
> @section anull
>
> Pass the audio source unchanged to the output.
> diff --git a/libavfilter/Makefile b/libavfilter/Makefile
> index dea012a..adbbc39 100644
> --- a/libavfilter/Makefile
> +++ b/libavfilter/Makefile
> @@ -29,6 +29,7 @@ OBJS-$(CONFIG_ACROSSFADE_FILTER) += af_afade.o
> OBJS-$(CONFIG_ADELAY_FILTER) += af_adelay.o
> OBJS-$(CONFIG_AECHO_FILTER) += af_aecho.o
> OBJS-$(CONFIG_AEMPHASIS_FILTER) += af_aemphasis.o
> +OBJS-$(CONFIG_ANEQUALIZER_FILTER) += af_anequalizer.o
> OBJS-$(CONFIG_AEVAL_FILTER) += aeval.o
> OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
> OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
> diff --git a/libavfilter/af_anequalizer.c b/libavfilter/af_anequalizer.c
> new file mode 100644
> index 0000000..72ce88a
> --- /dev/null
> +++ b/libavfilter/af_anequalizer.c
> @@ -0,0 +1,679 @@
> +/*
> + * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
> + * Copyright (c) 2015 Paul B Mahol
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include <complex.h>
> +
> +#include "libavutil/intreadwrite.h"
> +#include "libavutil/avstring.h"
> +#include "libavutil/opt.h"
> +#include "libavutil/parseutils.h"
> +#include "avfilter.h"
> +#include "internal.h"
> +#include "audio.h"
> +
> +#define FILTER_ORDER 4
> +
> +enum FilterType {
> + BUTTERWORTH,
> + CHEBYSHEV1,
> + CHEBYSHEV2,
> + NB_TYPES
> +};
> +
> +typedef struct FoSection {
> + double a0, a1, a2, a3, a4;
> + double b0, b1, b2, b3, b4;
> +
> + double num[4];
> + double denum[4];
> +} FoSection;
> +
> +typedef struct EqualizatorFilter {
> + int ignore;
> + int channel;
> + int type;
> +
> + double freq;
> + double gain;
> + double width;
> +
> + FoSection section[2];
> +} EqualizatorFilter;
> +
> +typedef struct AudioNEqualizerContext {
> + const AVClass *class;
> + char *args;
> + char *colors;
> + int draw_curves;
> + int w, h;
> +
> + double mag;
> + int fscale;
> + int nb_filters;
> + int nb_allocated;
> + EqualizatorFilter *filters;
> + AVFrame *video;
> +} AudioNEqualizerContext;
> +
> +#define OFFSET(x) offsetof(AudioNEqualizerContext, x)
> +#define A AV_OPT_FLAG_AUDIO_PARAM
> +#define V AV_OPT_FLAG_VIDEO_PARAM
> +#define F AV_OPT_FLAG_FILTERING_PARAM
> +
> +static const AVOption anequalizer_options[] = {
> + { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
> + { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
> + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x480"}, 0, 0, V|F },
> + { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
> + { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
> + { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
> + { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
> + { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
> + { NULL }
> +};
> +
> +AVFILTER_DEFINE_CLASS(anequalizer);
> +
> +static int config_video(AVFilterLink *outlink)
> +{
> + AVFilterContext *ctx = outlink->src;
> + AudioNEqualizerContext *s = ctx->priv;
> + AVFilterLink *inlink = ctx->inputs[0];
> + char *colors, *color, *saveptr = NULL;
> + AVFrame *out;
> + int ch, i, n;
> +
> + outlink->w = s->w;
> + outlink->h = s->h;
> +
> + av_frame_free(&s->video);
> + s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
> + if (!out)
> + return AVERROR(ENOMEM);
> + outlink->sample_aspect_ratio = (AVRational){1,1};
> + memset(out->data[0], 0, s->h * out->linesize[0]);
> +
> + colors = av_strdup(s->colors);
> + if (!colors)
> + return AVERROR(ENOMEM);
> +
> + for (ch = 0; ch < inlink->channels; ch++) {
> + uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
> + int prev_v = -1;
> + double f;
> +
> + color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
> + if (color)
> + av_parse_color(fg, color, -1, ctx);
> +
> + for (f = 0; f < s->w; f++) {
> + double complex z;
> + double complex H = 1;
> + double w;
> + int v, y, x;
> +
> + w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
> + z = 1. / cexp(I * w);
> +
> + for (n = 0; n < s->nb_filters; n++) {
> + if (s->filters[n].channel != ch)
> + continue;
> +
> + for (i = 0; i < FILTER_ORDER / 2; i++) {
> + FoSection *S = &s->filters[n].section[i];
> +
> + H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
> + ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0));
> + }
> + }
> +
> + v = av_clip((1. + -20 * log10(cabs(H)) / s->mag) * outlink->h / 2, 0, outlink->h - 1);
> + x = lrint(f);
> + if (prev_v == -1)
> + prev_v = v;
> + if (v <= prev_v) {
> + for (y = v; y <= prev_v; y++)
> + AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
> + } else {
> + for (y = prev_v; y <= v; y++)
> + AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
> + }
> +
> + prev_v = v;
> + }
> + }
> +
> + av_free(colors);
> +
> + return 0;
> +}
> +
> +static av_cold int init(AVFilterContext *ctx)
> +{
> + AudioNEqualizerContext *s = ctx->priv;
> + AVFilterPad pad, vpad;
> +
> + pad = (AVFilterPad){
> + .name = av_strdup("out0"),
> + .type = AVMEDIA_TYPE_AUDIO,
> + };
> +
> + if (!pad.name)
> + return AVERROR(ENOMEM);
> +
> + if (s->draw_curves) {
> + vpad = (AVFilterPad){
> + .name = av_strdup("out1"),
> + .type = AVMEDIA_TYPE_VIDEO,
> + .config_props = config_video,
> + };
> + if (!vpad.name)
> + return AVERROR(ENOMEM);
> + }
> +
> + ff_insert_outpad(ctx, 0, &pad);
> +
> + if (s->draw_curves)
> + ff_insert_outpad(ctx, 1, &vpad);
> +
> + return 0;
> +}
> +
> +static int query_formats(AVFilterContext *ctx)
> +{
> + AVFilterLink *inlink = ctx->inputs[0];
> + AVFilterLink *outlink = ctx->outputs[0];
> + AudioNEqualizerContext *s = ctx->priv;
> + AVFilterFormats *formats;
> + AVFilterChannelLayouts *layouts;
> + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
> + static const enum AVSampleFormat sample_fmts[] = {
> + AV_SAMPLE_FMT_DBLP,
> + AV_SAMPLE_FMT_NONE
> + };
> + int ret;
> +
> + if (s->draw_curves) {
> + AVFilterLink *videolink = ctx->outputs[1];
> + formats = ff_make_format_list(pix_fmts);
> + if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
> + return ret;
> + }
> +
> + formats = ff_make_format_list(sample_fmts);
> + if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
> + (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
> + return ret;
> +
> + layouts = ff_all_channel_counts();
> + if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
> + (ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
> + return ret;
> +
> + formats = ff_all_samplerates();
> + if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
> + (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
> + return ret;
> +
> + return 0;
pedantic: same leak issue
> +}
> +
> +static av_cold void uninit(AVFilterContext *ctx)
> +{
> + AudioNEqualizerContext *s = ctx->priv;
> +
> + av_freep(&ctx->output_pads[0].name);
> + if (s->draw_curves)
> + av_freep(&ctx->output_pads[1].name);
> + av_frame_free(&s->video);
> + av_freep(&s->filters);
> + s->nb_filters = 0;
> + s->nb_allocated = 0;
> +}
> +
> +static void butterworth_fo_section(FoSection *S, double beta,
> + double s, double g, double g0,
> + double D, double c0)
> +{
> + S->b0 = (g*g*beta*beta + 2*g*g0*s*beta + g0*g0)/D;
> + S->b1 = -4*c0*(g0*g0 + g*g0*s*beta)/D;
> + S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
> + S->b3 = -4*c0*(g0*g0 - g*g0*s*beta)/D;
> + S->b4 = (g*g*beta*beta - 2*g*g0*s*beta + g0*g0)/D;
> +
> + S->a0 = 1;
> + S->a1 = -4*c0*(1 + s*beta)/D;
> + S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
> + S->a3 = -4*c0*(1 - s*beta)/D;
> + S->a4 = (beta*beta - 2*s*beta + 1)/D;
> +}
> +
> +static void butterworth_bp_filter(EqualizatorFilter *f,
> + int N, double w0, double wb,
> + double G, double Gb, double G0)
> +{
> + double g, c0, g0, beta;
> + double epsilon;
> + int r = N % 2;
> + int L = (N - r) / 2;
> + int i;
> +
> + if (G == 0 && G0 == 0) {
> + f->section[0].a0 = 1;
> + f->section[0].b0 = 1;
> + f->section[1].a0 = 1;
> + f->section[1].b0 = 1;
> + return;
> + }
> +
> + G = pow(10, G/20);
> + Gb = pow(10, Gb/20);
> + G0 = pow(10, G0/20);
more useful: please use exp10, I added it just today
> +
> + epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
> + g = pow(G, 1.0 / N);
> + g0 = pow(G0, 1.0 / N);
> + beta = pow(epsilon, -1.0 / N) * tan(wb/2);
> +
> + c0 = cos(w0);
> + if (w0 == 0)
> + c0 = 1;
> + if (w0 == M_PI/2)
> + c0 = 0;
> + if (w0 == M_PI)
> + c0 = -1;
> +
> + for (i = 1; i <= L; i++) {
> + double ui = (2.0 * i - 1) / N;
> + double si = sin(M_PI * ui / 2.0);
> + double Di = beta * beta + 2 * si * beta + 1;
> +
> + butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
> + }
> +}
> +
> +static void chebyshev1_fo_section(FoSection *S, double a,
> + double c, double tetta_b,
> + double g0, double s, double b,
> + double D, double c0)
> +{
> + S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*s*tetta_b + g0*g0)/D;
> + S->b1 = -4*c0*(g0*g0 + g0*b*s*tetta_b)/D;
> + S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
> + S->b3 = -4*c0*(g0*g0 - g0*b*s*tetta_b)/D;
> + S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*s*tetta_b + g0*g0)/D;
> +
> + S->a0 = 1;
> + S->a1 = -4*c0*(1 + a*s*tetta_b)/D;
> + S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
> + S->a3 = -4*c0*(1 - a*s*tetta_b)/D;
> + S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*s*tetta_b + 1)/D;
> +}
> +
> +static void chebyshev1_bp_filter(EqualizatorFilter *f,
> + int N, double w0, double wb,
> + double G, double Gb, double G0)
> +{
> + double a, b, c0, g0, alfa, beta, tetta_b;
> + double epsilon;
> + int r = N % 2;
> + int L = (N - r) / 2;
> + int i;
> +
> + if (G == 0 && G0 == 0) {
> + f->section[0].a0 = 1;
> + f->section[0].b0 = 1;
> + f->section[1].a0 = 1;
> + f->section[1].b0 = 1;
> + return;
> + }
> +
> + G = pow(10, G/20);
> + Gb = pow(10, Gb/20);
> + G0 = pow(10, G0/20);
same
> +
> + epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
> + g0 = pow(G0,1.0/N);
> + alfa = pow(1.0/epsilon + sqrt(1 + pow(epsilon,-2.0)), 1.0/N);
> + beta = pow(G/epsilon + Gb * sqrt(1 + pow(epsilon,-2.0)), 1.0/N);
> + a = 0.5 * (alfa - 1.0/alfa);
> + b = 0.5 * (beta - g0*g0*(1/beta));
> + tetta_b = tan(wb/2);
> +
> + c0 = cos(w0);
> + if (w0 == 0)
> + c0 = 1;
> + if (w0 == M_PI/2)
> + c0 = 0;
> + if (w0 == M_PI)
> + c0 = -1;
What are all these cases for? just do a cos(w0) and be done with it.
The only one (still dubious due to the fact that floating point varies
across platforms) possibly worth separate attention is M_PI/2, since
it yields 6e-17 on my platform (which is still essentially 0). Rest
are just ludicruous.
> +
> + for (i = 1; i <= L; i++) {
> + double ui = (2.0*i-1.0)/N;
> + double ci = cos(M_PI*ui/2.0);
> + double si = sin(M_PI*ui/2.0);
> + double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
> +
> + chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
> + }
> +}
> +
> +static void chebyshev2_fo_section(FoSection *S, double a,
> + double c, double tetta_b,
> + double g, double s, double b,
> + double D, double c0)
> +{
> + S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*s*tetta_b + b*b + g*g*c*c)/D;
> + S->b1 = -4*c0*(b*b + g*g*c*c + g*b*s*tetta_b)/D;
> + S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
> + S->b3 = -4*c0*(b*b + g*g*c*c - g*b*s*tetta_b)/D;
> + S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*s*tetta_b + b*b + g*g*c*c)/D;
> +
> + S->a0 = 1;
> + S->a1 = -4*c0*(a*a + c*c + a*s*tetta_b)/D;
> + S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
> + S->a3 = -4*c0*(a*a + c*c - a*s*tetta_b)/D;
> + S->a4 = (tetta_b*tetta_b - 2*a*s*tetta_b + a*a + c*c)/D;
> +}
> +
> +static void chebyshev2_bp_filter(EqualizatorFilter *f,
> + int N, double w0, double wb,
> + double G, double Gb, double G0)
> +{
> + double a, b, c0, tetta_b;
> + double epsilon, g, eu, ew;
> + int r = N % 2;
> + int L = (N - r) / 2;
> + int i;
> +
> + if (G == 0 && G0 == 0) {
> + f->section[0].a0 = 1;
> + f->section[0].b0 = 1;
> + f->section[1].a0 = 1;
> + f->section[1].b0 = 1;
> + return;
> + }
> +
> + G = pow(10, G/20);
> + Gb = pow(10, Gb/20);
> + G0 = pow(10, G0/20);
same, pow -> exp10
> +
> + epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
> + g = pow(G, 1.0 / N);
> + eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
> + ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
> + a = (eu - 1.0/eu)/2.0;
> + b = (ew - g*g/ew)/2.0;
> + tetta_b = tan(wb/2);
> +
> + c0 = cos(w0);
> + if (w0 == 0)
> + c0 = 1;
> + if (w0 == M_PI/2)
> + c0 = 0;
> + if (w0 == M_PI)
> + c0 = -1;
same, get rid of useless cases
> +
> + for (i = 1; i <= L; i++) {
> + double ui = (2.0 * i - 1.0)/N;
> + double ci = cos(M_PI * ui / 2.0);
> + double si = sin(M_PI * ui / 2.0);
> + double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
> +
> + chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
> + }
> +}
> +
> +static double butterworth_compute_bw_gain_db(double gain)
> +{
> + double bw_gain = 0;
> +
> + if (gain <= -6)
> + bw_gain = gain + 3;
> + else if(gain > -6 && gain < 6)
> + bw_gain = gain * 0.5;
> + else if(gain >= 6)
> + bw_gain = gain - 3;
> +
> + return bw_gain;
> +}
> +
> +static double chebyshev1_compute_bw_gain_db(double gain)
> +{
> + double bw_gain = 0;
> +
> + if (gain <= -6)
> + bw_gain = gain + 1;
> + else if(gain > -6 && gain < 6)
> + bw_gain = gain * 0.9;
> + else if(gain >= 6)
> + bw_gain = gain - 1;
> +
> + return bw_gain;
> +}
> +
> +static double chebyshev2_compute_bw_gain_db(double gain)
> +{
> + double bw_gain = 0;
> +
> + if (gain <= -6)
> + bw_gain = -3;
> + else if(gain > -6 && gain < 6)
> + bw_gain = gain * 0.3;
> + else if(gain >= 6)
> + bw_gain = 3;
> +
> + return bw_gain;
> +}
> +
> +static inline double hz_2_rad(double x, double fs)
> +{
> + return 2 * M_PI * x / fs;
> +}
> +
> +static void equalizer(EqualizatorFilter *f, double sample_rate)
> +{
> + double w0 = hz_2_rad(f->freq, sample_rate);
> + double wb = hz_2_rad(f->width, sample_rate);
> + double bw_gain;
> +
> + switch (f->type) {
> + case BUTTERWORTH:
> + bw_gain = butterworth_compute_bw_gain_db(f->gain);
> + butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
> + break;
> + case CHEBYSHEV1:
> + bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
> + chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
> + break;
> + case CHEBYSHEV2:
> + bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
> + chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
> + break;
> + }
> +
> +}
> +
> +static int config_input(AVFilterLink *inlink)
> +{
> + AVFilterContext *ctx = inlink->dst;
> + AudioNEqualizerContext *s = ctx->priv;
> + char *args = av_strdup(s->args);
> + char *saveptr = NULL;
> +
> + if (!args)
> + return AVERROR(ENOMEM);
> +
> + s->nb_allocated = 32 * inlink->channels;
> + s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
> + if (!s->filters) {
> + s->nb_allocated = 0;
> + return AVERROR(ENOMEM);
> + }
> +
> + while (1) {
> + char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
> +
> + if (!arg)
> + break;
> +
> + s->filters[s->nb_filters].type = 0;
> + if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
> + &s->filters[s->nb_filters].freq,
> + &s->filters[s->nb_filters].width,
> + &s->filters[s->nb_filters].gain,
> + &s->filters[s->nb_filters].type) != 5 &&
> + sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
> + &s->filters[s->nb_filters].freq,
> + &s->filters[s->nb_filters].width,
> + &s->filters[s->nb_filters].gain) != 4 ) {
> + av_free(args);
> + return AVERROR(EINVAL);
> + }
> +
> + if (s->filters[s->nb_filters].freq < 0 ||
> + s->filters[s->nb_filters].freq >= inlink->sample_rate / 2)
> + s->filters[s->nb_filters].ignore = 1;
> +
> + if (s->filters[s->nb_filters].channel < 0 ||
> + s->filters[s->nb_filters].channel >= inlink->channels)
> + s->filters[s->nb_filters].ignore = 1;
> +
> + av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
> + equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
> + s->nb_filters++;
> + if (s->nb_filters >= s->nb_allocated) {
> + EqualizatorFilter *filters;
> +
> + filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
> + if (!filters) {
> + av_free(args);
> + return AVERROR(ENOMEM);
> + }
> + memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
> + av_free(s->filters);
> + s->filters = filters;
> + s->nb_allocated *= 2;
> + }
> + }
> +
> + av_free(args);
> +
> + return 0;
> +}
> +
> +static inline double section_process(FoSection *S, double in)
> +{
> + double out;
> +
> + out = S->b0 * in;
> + out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
> + out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
> + out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
> + out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
> +
> + S->num[3] = S->num[2];
> + S->num[2] = S->num[1];
> + S->num[1] = S->num[0];
> + S->num[0] = in;
> +
> + S->denum[3] = S->denum[2];
> + S->denum[2] = S->denum[1];
> + S->denum[1] = S->denum[0];
> + S->denum[0] = out;
> +
> + return out;
> +}
> +
> +static double process_sample(FoSection *s1, double in)
> +{
> + double p0 = in, p1;
> + int i;
> +
> + for (i = 0; i < FILTER_ORDER / 2; i++) {
> + p1 = section_process(&s1[i], p0);
> + p0 = p1;
> + }
> +
> + return p1;
> +}
> +
> +static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
> +{
> + AVFilterContext *ctx = inlink->dst;
> + AudioNEqualizerContext *s = ctx->priv;
> + AVFilterLink *outlink = ctx->outputs[0];
> + double *bptr;
> + int i, n;
> +
> + for (i = 0; i < s->nb_filters; i++) {
> + EqualizatorFilter *f = &s->filters[i];
> +
> + if (f->gain == 0. || f->ignore)
> + continue;
> +
> + bptr = (double *)buf->extended_data[f->channel];
> + for (n = 0; n < buf->nb_samples; n++) {
> + double sample = bptr[n];
> +
> + sample = process_sample(f->section, sample);
> + bptr[n] = sample;
> + }
> + }
> +
> + if (s->draw_curves) {
> + const int64_t pts = buf->pts +
> + av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
> + outlink->time_base);
> + int ret;
> +
> + s->video->pts = pts;
> + ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
> + if (ret < 0)
> + return ret;
> + }
> +
> + return ff_filter_frame(outlink, buf);
> +}
> +
> +static const AVFilterPad inputs[] = {
> + {
> + .name = "default",
> + .type = AVMEDIA_TYPE_AUDIO,
> + .config_props = config_input,
> + .filter_frame = filter_frame,
> + .needs_writable = 1,
> + },
> + { NULL }
> +};
> +
> +AVFilter ff_af_anequalizer = {
> + .name = "anequalizer",
> + .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
> + .priv_size = sizeof(AudioNEqualizerContext),
> + .priv_class = &anequalizer_class,
> + .init = init,
> + .uninit = uninit,
> + .query_formats = query_formats,
> + .inputs = inputs,
> + .outputs = NULL,
> + .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
> +};
> diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
> index 131e067..a039a39 100644
> --- a/libavfilter/allfilters.c
> +++ b/libavfilter/allfilters.c
> @@ -59,6 +59,7 @@ void avfilter_register_all(void)
> REGISTER_FILTER(ALLPASS, allpass, af);
> REGISTER_FILTER(AMERGE, amerge, af);
> REGISTER_FILTER(AMIX, amix, af);
> + REGISTER_FILTER(ANEQUALIZER, anequalizer, af);
> REGISTER_FILTER(ANULL, anull, af);
> REGISTER_FILTER(APAD, apad, af);
> REGISTER_FILTER(APERMS, aperms, af);
Not tested, above all based on code inspection.
> --
> 1.9.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
More information about the ffmpeg-devel
mailing list