[FFmpeg-devel] [PATCH] libavfilter: created a new filter that obtains the average peak signal-to-noise ratio (PSNR) of two input video files in YUV format.
=?UTF-8?q?Roger=20Pau=20Monn=E9?=
roger.pau at entel.upc.edu
Sun Jun 5 21:00:35 CEST 2011
Signed-off-by: Roger Pau Monné <roger.pau at entel.upc.edu>
---
libavfilter/vf_psnr.c | 272 +++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 272 insertions(+), 0 deletions(-)
create mode 100644 libavfilter/vf_psnr.c
diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c
new file mode 100644
index 0000000..c6a0b78
--- /dev/null
+++ b/libavfilter/vf_psnr.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2011 Roger Pau Monné <roger.pau at entel.upc.edu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Caculate the PSNR between two input videos
+ * Based on the overlay filter
+ */
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+
+#undef fprintf
+
+typedef struct {
+ AVFilterBufferRef *picref;
+ double mse, min_mse, max_mse;
+ int nb_frames;
+ FILE *vstats_file;
+ uint16_t *line1, *line2;
+} PSNRContext;
+
+static inline int pow2(int base)
+{
+ return base*base;
+}
+
+static inline double psnr(double mse, int nb_frames, int max)
+{
+ return 10.0*log((pow2(max))/(mse/nb_frames))/log(10.0);
+}
+
+static inline void av_compute_images_mse(const uint8_t *ref_data[4], const uint8_t *data[4], const int linesizes[4],
+ int w, int h, const AVPixFmtDescriptor *desc, double mse[4], uint16_t *line1,
+ uint16_t *line2)
+{
+ int i, c, j = w;
+
+ memset(mse, 0, sizeof(*mse)*4);
+
+ for (c = 0; c < desc->nb_components; c++) {
+ int w1 = c == 1 || c == 2 ? w>>desc->log2_chroma_w : w;
+ int h1 = c == 1 || c == 2 ? h>>desc->log2_chroma_h : h;
+
+ for (i = 0; i < h1; i++) {
+ av_read_image_line(line1,
+ ref_data,
+ linesizes,
+ desc,
+ 0, i, c, w1, 0);
+ av_read_image_line(line2,
+ data,
+ linesizes,
+ desc,
+ 0, i, c, w1, 0);
+ for(j = 0; j < w1; j++)
+ mse[c] += pow2(line1[j] - line2[j]);
+ }
+ mse[c] /= w1*h1;
+ }
+}
+
+
+static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
+{
+ PSNRContext *psnr_context = ctx->priv;
+
+ psnr_context->mse = psnr_context->nb_frames = 0;
+ psnr_context->min_mse = psnr_context->max_mse = -1.0;
+ psnr_context->picref = NULL;
+
+ if (args != NULL && strlen(args) > 0) {
+ psnr_context->vstats_file = fopen(args, "w");
+ if (!psnr_context->vstats_file) {
+ av_log(ctx, AV_LOG_ERROR, "Could not open stats file %s\n", args);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PSNRContext *psnr_context = ctx->priv;
+
+ av_log(ctx, AV_LOG_INFO, "PSNR average:%0.2fdB min:%0.2fdB max:%0.2fdB\n",
+ psnr(psnr_context->mse, psnr_context->nb_frames, 255),
+ psnr(psnr_context->max_mse, 1, 255),
+ psnr(psnr_context->min_mse, 1, 255));
+
+ if (psnr_context->picref) {
+ avfilter_unref_buffer(psnr_context->picref);
+ psnr_context->picref = NULL;
+ }
+ if (psnr_context->line1)
+ av_free(psnr_context->line1);
+ if (psnr_context->line2)
+ av_free(psnr_context->line2);
+ if (psnr_context->vstats_file)
+ fclose(psnr_context->vstats_file);
+}
+
+static int config_input_ref(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PSNRContext *psnr_context = ctx->priv;
+
+ if (ctx->inputs[0]->w != ctx->inputs[1]->w || ctx->inputs[0]->h != ctx->inputs[1]->h) {
+ av_log(ctx, AV_LOG_ERROR, "Width and/or heigth of input videos are different, could not calculate PSNR\n");
+ return AVERROR(EINVAL);
+ }
+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
+ av_log(ctx, AV_LOG_ERROR, "Input filters have different pixel formats, could not calculate PSNR\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(psnr_context->line1 = av_malloc(sizeof(*psnr_context->line1) * inlink->w)) || !(psnr_context->line2 = av_malloc(sizeof(*psnr_context->line2) * inlink->w)))
+ return AVERROR(ENOMEM);
+
+ av_log(ctx, AV_LOG_INFO, "PSNR filter started correctly\n");
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+
+ outlink->time_base = outlink->src->inputs[0]->time_base;
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV420P,
+ PIX_FMT_YUV411P, PIX_FMT_YUV410P,
+ PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P,
+ PIX_FMT_YUV440P, PIX_FMT_YUVJ440P
+ };
+
+ avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+{
+ AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
+ AVFilterContext *ctx = inlink->dst;
+ PSNRContext *psnr_context = ctx->priv;
+
+ inlink->dst->outputs[0]->out_buf = outpicref;
+ outpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[0]->time_base, ctx->outputs[0]->time_base);
+
+ if (psnr_context->picref) {
+ avfilter_unref_buffer(psnr_context->picref);
+ psnr_context->picref = NULL;
+ }
+ avfilter_request_frame(ctx->inputs[1]);
+
+ avfilter_start_frame(inlink->dst->outputs[0], outpicref);
+}
+
+static void start_frame_ref(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PSNRContext *psnr_context = ctx->priv;
+
+ psnr_context->picref = inpicref;
+ psnr_context->picref->pts = av_rescale_q(inpicref->pts, ctx->inputs[1]->time_base, ctx->outputs[0]->time_base);
+}
+static void end_frame(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PSNRContext *psnr_context = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterBufferRef *outpic = outlink->out_buf;
+ AVFilterBufferRef *ref = psnr_context->picref;
+ double mse[4];
+ double mse_t = 0;
+
+ if (psnr_context->picref) {
+ av_compute_images_mse((const uint8_t **) outpic->data, (const uint8_t **) ref->data,
+ outpic->linesize, outpic->video->w, outpic->video->h,
+ &av_pix_fmt_descriptors[inlink->format], mse, psnr_context->line1,
+ psnr_context->line2);
+
+ for(int j = 0; j < av_pix_fmt_descriptors[inlink->format].nb_components; j++) {
+ mse_t += mse[j];
+ }
+ mse_t /= av_pix_fmt_descriptors[inlink->format].nb_components;
+
+ if (psnr_context->min_mse == -1) {
+ psnr_context->min_mse = mse_t;
+ psnr_context->max_mse = mse_t;
+ }
+ if (psnr_context->min_mse > mse_t)
+ psnr_context->min_mse = mse_t;
+ if (psnr_context->max_mse < mse_t)
+ psnr_context->max_mse = mse_t;
+
+ psnr_context->mse += mse_t;
+ psnr_context->nb_frames++;
+
+ if (psnr_context->vstats_file) {
+ fprintf(psnr_context->vstats_file, "Frame:%d Y:%0.2fdB Cb:%0.2fdB Cr:%0.2fdB PSNR:%0.2fdB\n",
+ psnr_context->nb_frames,
+ psnr(mse[0], 1, 255),
+ psnr(mse[1], 1, 255),
+ psnr(mse[2], 1, 255),
+ psnr(mse_t, 1, 255));
+ }
+ }
+
+ avfilter_end_frame(outlink);
+ avfilter_unref_buffer(inlink->cur_buf);
+}
+
+static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
+
+static void null_end_frame(AVFilterLink *inlink) { }
+
+AVFilter avfilter_vf_psnr = {
+ .name = "psnr",
+ .description = NULL_IF_CONFIG_SMALL("Calculates the PSNR given two input files in YUV format."),
+
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .priv_size = sizeof(PSNRContext),
+
+ .inputs = (AVFilterPad[]) {{ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame,
+ .draw_slice = null_draw_slice,
+ .end_frame = end_frame,
+ .min_perms = AV_PERM_READ, },
+ { .name = "ref",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .start_frame = start_frame_ref,
+ .config_props = config_input_ref,
+ .draw_slice = null_draw_slice,
+ .end_frame = null_end_frame,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL}},
+ .outputs = (AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output, },
+ { .name = NULL}},
+};
--
1.7.3.3
More information about the ffmpeg-devel
mailing list