[FFmpeg-cvslog] avfilter/avf_showcqt: switch to activate
Paul B Mahol
git at videolan.org
Thu Jun 1 21:23:34 EEST 2023
ffmpeg | branch: master | Paul B Mahol <onemda at gmail.com> | Thu Jun 1 20:07:13 2023 +0200| [3cfd85ec20460c34199d191c2caff9a2e0650829] | committer: Paul B Mahol
avfilter/avf_showcqt: switch to activate
And make output frame durations always same.
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=3cfd85ec20460c34199d191c2caff9a2e0650829
---
libavfilter/avf_showcqt.c | 84 ++++++++++++++++++++++++++++-------------------
1 file changed, 50 insertions(+), 34 deletions(-)
diff --git a/libavfilter/avf_showcqt.c b/libavfilter/avf_showcqt.c
index 76086477e9..fa3cdaf22b 100644
--- a/libavfilter/avf_showcqt.c
+++ b/libavfilter/avf_showcqt.c
@@ -27,6 +27,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
#include "lavfutils.h"
#include "lswsutils.h"
@@ -51,8 +52,6 @@
"st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
"r(1-ld(1)) + b(ld(1))"
#define CSCHEME "1|0.5|0|0|0.5|1"
-#define PTS_STEP 10
-#define PTS_TOLERANCE 1
#define OFFSET(x) offsetof(ShowCQTContext, x)
#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
@@ -344,7 +343,7 @@ static int init_cqt(ShowCQTContext *s)
}
av_expr_free(expr);
- av_log(s->ctx, AV_LOG_INFO, "nb_cqt_coeffs = %d.\n", nb_cqt_coeffs);
+ av_log(s->ctx, AV_LOG_VERBOSE, "nb_cqt_coeffs = %d.\n", nb_cqt_coeffs);
return 0;
error:
@@ -1186,9 +1185,6 @@ static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
s->draw_sono(out, s->sono_frame, s->bar_h + s->axis_h, s->sono_idx);
UPDATE_TIME(s->sono_time);
}
- out->pts = s->next_pts;
- out->duration = PTS_STEP;
- s->next_pts += PTS_STEP;
}
s->sono_count = (s->sono_count + 1) % s->count;
if (s->sono_h)
@@ -1366,8 +1362,8 @@ static int config_output(AVFilterLink *outlink)
s->format = outlink->format;
outlink->sample_aspect_ratio = av_make_q(1, 1);
outlink->frame_rate = s->rate;
- outlink->time_base = av_mul_q(av_inv_q(s->rate), av_make_q(1, PTS_STEP));
- av_log(ctx, AV_LOG_INFO, "video: %dx%d %s %d/%d fps, bar_h = %d, axis_h = %d, sono_h = %d.\n",
+ outlink->time_base = av_inv_q(s->rate);
+ av_log(ctx, AV_LOG_VERBOSE, "video: %dx%d %s %d/%d fps, bar_h = %d, axis_h = %d, sono_h = %d.\n",
s->width, s->height, av_get_pix_fmt_name(s->format), s->rate.num, s->rate.den,
s->bar_h, s->axis_h, s->sono_h);
@@ -1380,7 +1376,7 @@ static int config_output(AVFilterLink *outlink)
s->fft_bits = FFMAX(ceil(log2(inlink->sample_rate * s->timeclamp)), 4);
s->fft_len = 1 << s->fft_bits;
- av_log(ctx, AV_LOG_INFO, "fft_len = %d, cqt_len = %d.\n", s->fft_len, s->cqt_len);
+ av_log(ctx, AV_LOG_VERBOSE, "fft_len = %d, cqt_len = %d.\n", s->fft_len, s->cqt_len);
ret = av_tx_init(&s->fft_ctx, &s->tx_fn, AV_TX_FLOAT_FFT, 0, s->fft_len, &scale, 0);
s->fft_data = av_calloc(s->fft_len, sizeof(*s->fft_data));
@@ -1470,11 +1466,10 @@ static int config_output(AVFilterLink *outlink)
s->step = (int)(s->step_frac.num / s->step_frac.den);
s->step_frac.num %= s->step_frac.den;
if (s->step_frac.num) {
- av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d + %d/%d.\n",
+ av_log(ctx, AV_LOG_VERBOSE, "audio: %d Hz, step = %d + %d/%d.\n",
inlink->sample_rate, s->step, s->step_frac.num, s->step_frac.den);
- av_log(ctx, AV_LOG_WARNING, "fractional step.\n");
} else {
- av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d.\n",
+ av_log(ctx, AV_LOG_VERBOSE, "audio: %d Hz, step = %d.\n",
inlink->sample_rate, s->step);
}
@@ -1487,7 +1482,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowCQTContext *s = ctx->priv;
- int remaining, step, ret, x, i, j, m;
+ int remaining, step, ret, x, i, j, m, got_frame = 0;
float *audio_data;
AVFrame *out = NULL;
@@ -1503,11 +1498,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
for (x = 0; x < (s->fft_len/2 + s->remaining_fill_max - step); x++)
s->fft_data[x] = s->fft_data[x+step];
s->remaining_fill += step;
+ s->next_pts++;
- if (out)
+ if (out) {
+ out->pts = s->next_pts;
+ out->duration = 1;
return ff_filter_frame(outlink, out);
+ }
}
- return AVERROR_EOF;
+ return 0;
}
remaining = insamples->nb_samples;
@@ -1528,16 +1527,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
}
remaining -= s->remaining_fill;
if (out) {
- int64_t pts = av_rescale_q(insamples->pts, inlink->time_base, av_make_q(1, inlink->sample_rate));
- pts += insamples->nb_samples - remaining - s->remaining_fill_max;
- pts = av_rescale_q(pts, av_make_q(1, inlink->sample_rate), outlink->time_base);
- if (FFABS(pts - out->pts) > PTS_TOLERANCE) {
- av_log(ctx, AV_LOG_DEBUG, "changing pts from %"PRId64" (%.3f) to %"PRId64" (%.3f).\n",
- out->pts, out->pts * av_q2d(outlink->time_base),
- pts, pts * av_q2d(outlink->time_base));
- out->pts = pts;
- s->next_pts = pts + PTS_STEP;
- }
+ int64_t pts = av_rescale_q(insamples->nb_samples - remaining - s->remaining_fill_max,
+ av_make_q(1, inlink->sample_rate), inlink->time_base);
+ out->pts = av_rescale_q(insamples->pts + pts, inlink->time_base, outlink->time_base);
+ out->duration = 1;
+ got_frame = 1;
ret = ff_filter_frame(outlink, out);
if (ret < 0) {
av_frame_free(&insamples);
@@ -1559,26 +1553,48 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
remaining = 0;
}
}
+ if (!got_frame)
+ ff_filter_set_ready(ctx, 100);
av_frame_free(&insamples);
return 0;
}
-static int request_frame(AVFilterLink *outlink)
+static int activate(AVFilterContext *ctx)
{
- AVFilterLink *inlink = outlink->src->inputs[0];
- int ret;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowCQTContext *s = ctx->priv;
+ int nb_samples, ret, status;
+ int64_t pts;
+ AVFrame *in;
- ret = ff_request_frame(inlink);
- if (ret == AVERROR_EOF)
- ret = filter_frame(inlink, NULL);
- return ret;
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ nb_samples = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
+ ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return filter_frame(inlink, in);
+
+ if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ if (status == AVERROR_EOF) {
+ s->next_pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
+ ret = filter_frame(inlink, NULL);
+ ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
+ return ret;
+ }
+ }
+
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+
+ return FFERROR_NOT_READY;
}
static const AVFilterPad showcqt_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
},
};
@@ -1587,7 +1603,6 @@ static const AVFilterPad showcqt_outputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
- .request_frame = request_frame,
},
};
@@ -1595,6 +1610,7 @@ const AVFilter ff_avf_showcqt = {
.name = "showcqt",
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant/Clamped Q Transform) spectrum video output."),
.init = init,
+ .activate = activate,
.uninit = uninit,
.priv_size = sizeof(ShowCQTContext),
FILTER_INPUTS(showcqt_inputs),
More information about the ffmpeg-cvslog
mailing list