[FFmpeg-devel] [PATCH v2 1/1] lavfi/vf_gopromax_opencl: add GoPor Max 360 video filter
TADANO Tokumei
aimingoff at pc.nifty.jp
Sat Jul 27 19:42:09 EEST 2024
On 2024/07/27 13:30, TADANO Tokumei wrote:
> Add an OpenCL filter for filtering GoPro Max native .360 files
> into standard equirectangular or youtube equiangular cubemap (eac)
> projection.
>
> The .360 file contains separated two video streams.
> This filter combine two streams into single stream with standard
> format.
> ---
> doc/filters.texi | 78 +++++++
> libavfilter/Makefile | 2 +
> libavfilter/allfilters.c | 1 +
> libavfilter/opencl/gopromax.cl | 280 ++++++++++++++++++++++++
> libavfilter/opencl_source.h | 1 +
> libavfilter/vf_gopromax_opencl.c | 351 +++++++++++++++++++++++++++++++
> 6 files changed, 713 insertions(+)
> create mode 100644 libavfilter/opencl/gopromax.cl
> create mode 100644 libavfilter/vf_gopromax_opencl.c
The patchwork failed, but it was caused by opencl.c (not by this patch):
In file included from ./libavutil/common.h:48:0,
from ./libavutil/avutil.h:301,
from ./libavutil/opt.h:31,
from libavdevice/sdl2.c:31:
./config.h:335:0: warning: 'HAVE_PTHREAD_SETNAME_NP' redefined
#define HAVE_PTHREAD_SETNAME_NP 0
In file included from /usr/include/SDL2/SDL_stdinc.h:31:0,
from /usr/include/SDL2/SDL_main.h:25,
from /usr/include/SDL2/SDL.h:32,
from libavdevice/sdl2.c:26:
/usr/include/SDL2/SDL_config.h:186:0: note: this is the location of the previous definition
#define HAVE_PTHREAD_SETNAME_NP 1
In file included from libavfilter/opencl.h:31:0,
from libavfilter/opencl.c:26:
./libavutil/hwcontext_opencl.h:25:10: fatal error: CL/cl.h: No such file or directory
#include
^~~~~~~~~
compilation terminated.
make: *** [libavfilter/opencl.o] Error 1
> diff --git a/doc/filters.texi b/doc/filters.texi
> index 2585d818ff..dd9ab47686 100644
> --- a/doc/filters.texi
> +++ b/doc/filters.texi
> @@ -27101,6 +27101,84 @@ Apply dilation filter with threshold0 set to 30, threshold1 set 40, threshold2 s
> @end example
> @end itemize
>
> + at anchor{gopromax_opencl}
> + at section gopromax_opencl
> +
> +Apply transformation of the two GoPro Max video streams to equirectangular or equiangular-cubemap projection.
> +
> +This filter is designed to use directly GoPro .360 files.
> +Native .360 files are sort of EAC files, in fact the front and rear lenses streams are the top and the bottom of the EAC projection.
> +
> +The .360 file contains two video streams.
> +Most of cases, one is stream #0:0, and the other is stream #0:5.
> +Please check actual stream number with @code{ffprobe} command.
> +This filter combine two streams to single stream.
> +
> +The .360 contains also 2x64 bits of overlapped area.
> +The filter blends overlapped images in these two areas.
> +
> +The filter accepts the following options:
> +
> + at table @option
> +
> + at item output
> +Set format of the output video.
> +
> +Available formats:
> +
> + at table @samp
> +
> + at item e
> + at item equirect
> +Equirectangular projection.
> +
> + at item eac
> +Equi-Angular Cubemap.
> +
> + at end table
> +
> +Default is @code{equirect}.
> +
> + at item w
> + at item h
> +Set the output video resolution.
> +
> +Default resolution depends on formats.
> +
> + at item overlap
> +Set number of overlapped pixels on input .360 video.
> +
> +No need to specify this option for native .360 video file.
> +This option is for rescaled video or future video format change.
> +
> +Default is @code{64}.
> +
> + at end table
> +
> + at subsection Example
> +
> + at itemize
> + at item
> +Convert .360 to Equirectangular projection.
> + at example
> +-i INPUT -filter_complex '[0:0]hwupload[a], [0:5]hwupload[b], [a][b]gopromax_opencl=w=4096:h=2048, hwdownload, format=yuvj420p' -map 0:a:0 -c:a copy OUTPUT
> + at end example
> +
> +Two video streams (#0:0 and #0:5) are combined and converted to default equirectangular projection with specified resolution.
> +First audio stream (GoPro AAC) is copied with the video stream.
> +
> + at item
> +Convert .360 to Equi-Angular Cubemap projection.
> + at example
> +-i INPUT -filter_complex '[0:0]hwupload[a], [0:5]hwupload[b], [a][b]gopromax_opencl=eac, hwdownload, format=yuvj420p, v360=eac:c3x2:w=1344:h=896' -map 0:1 -map 0:3 -c:a copy -c:u copy OUTPUT
> + at end example
> +
> +Two video streams (#0:0 and #0:5) are combined and converted to equi-angular cubemap projection,
> +then it is converted to c3x2 cubemap projection and shrunk by v360 filter.
> +Stream #0:1 (GoPro AAC) and stream #0:3 (GoPro MET) are copied with the video stream.
> +
> + at end itemize
> +
> @anchor{nlmeans_opencl}
> @section nlmeans_opencl
>
> diff --git a/libavfilter/Makefile b/libavfilter/Makefile
> index 63088e9286..9b5fa78920 100644
> --- a/libavfilter/Makefile
> +++ b/libavfilter/Makefile
> @@ -327,6 +327,8 @@ OBJS-$(CONFIG_FSYNC_FILTER) += vf_fsync.o
> OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o
> OBJS-$(CONFIG_GBLUR_VULKAN_FILTER) += vf_gblur_vulkan.o vulkan.o vulkan_filter.o
> OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
> +OBJS-$(CONFIG_GOPROMAX_OPENCL_FILTER) += vf_gopromax_opencl.o opencl.o \
> + opencl/gopromax.o framesync.o
> OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
> OBJS-$(CONFIG_GRAPHMONITOR_FILTER) += f_graphmonitor.o
> OBJS-$(CONFIG_GRAYWORLD_FILTER) += vf_grayworld.o
> diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
> index 63600e9b58..5a517d8586 100644
> --- a/libavfilter/allfilters.c
> +++ b/libavfilter/allfilters.c
> @@ -303,6 +303,7 @@ extern const AVFilter ff_vf_fsync;
> extern const AVFilter ff_vf_gblur;
> extern const AVFilter ff_vf_gblur_vulkan;
> extern const AVFilter ff_vf_geq;
> +extern const AVFilter ff_vf_gopromax_opencl;
> extern const AVFilter ff_vf_gradfun;
> extern const AVFilter ff_vf_graphmonitor;
> extern const AVFilter ff_vf_grayworld;
> diff --git a/libavfilter/opencl/gopromax.cl b/libavfilter/opencl/gopromax.cl
> new file mode 100644
> index 0000000000..64d2afe31c
> --- /dev/null
> +++ b/libavfilter/opencl/gopromax.cl
> @@ -0,0 +1,280 @@
> +/*
> + * Copyright (c) 2021 Ronan LE MEILLAT
> + * Copyright (c) 2024 TADANO Tokumei
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +enum Faces {
> + TOP_LEFT,
> + TOP_MIDDLE,
> + TOP_RIGHT,
> + BOTTOM_LEFT,
> + BOTTOM_MIDDLE,
> + BOTTOM_RIGHT,
> + NB_FACES,
> +};
> +
> +enum Direction {
> + RIGHT,
> + LEFT,
> + UP,
> + DOWN,
> + FRONT,
> + BACK,
> + NB_DIRECTIONS,
> +};
> +
> +enum Rotation {
> + ROT_0,
> + ROT_90,
> + ROT_180,
> + ROT_270,
> + NB_ROTATIONS,
> +};
> +
> +static float2 rotate_cube_face(float2 uv, int rotation)
> +{
> + float2 ret_uv;
> +
> + switch (rotation) {
> + case ROT_0:
> + ret_uv = uv;
> + break;
> + case ROT_90:
> + ret_uv.x = -uv.y;
> + ret_uv.y = uv.x;
> + break;
> + case ROT_180:
> + ret_uv.x = -uv.x;
> + ret_uv.y = -uv.y;
> + break;
> + case ROT_270:
> + ret_uv.x = uv.y;
> + ret_uv.y = -uv.x;
> + break;
> + }
> +
> + return ret_uv;
> +}
> +
> +static float3 equirect_to_xyz(int2 xy, int2 size)
> +{
> + float3 xyz;
> + float phi = ((2.f * ((float)xy.x) + 1.f) / ((float)size.x) - 1.f) * M_PI_F ;
> + float theta = ((2.f * ((float)xy.y) + 1.f) / ((float)size.y) - 1.f) * M_PI_2_F;
> +
> + xyz.x = cos(theta) * sin(phi);
> + xyz.y = sin(theta);
> + xyz.z = cos(theta) * cos(phi);
> +
> + return xyz;
> +}
> +
> +static float2 xyz_to_cube(float3 xyz, int *face)
> +{
> + float phi = atan2(xyz.x, xyz.z);
> + float theta = asin(xyz.y);
> + float phi_norm, theta_threshold;
> + float2 uv;
> + int direction;
> +
> + if (phi >= -M_PI_4_F && phi < M_PI_4_F) {
> + direction = FRONT;
> + phi_norm = phi;
> + } else if (phi >= -(M_PI_2_F + M_PI_4_F) && phi < -M_PI_4_F) {
> + direction = LEFT;
> + phi_norm = phi + M_PI_2_F;
> + } else if (phi >= M_PI_4_F && phi < M_PI_2_F + M_PI_4_F) {
> + direction = RIGHT;
> + phi_norm = phi - M_PI_2_F;
> + } else {
> + direction = BACK;
> + phi_norm = phi + ((phi > 0.f) ? -M_PI_F : M_PI_F);
> + }
> +
> + theta_threshold = atan(cos(phi_norm));
> + if (theta > theta_threshold) {
> + direction = DOWN;
> + } else if (theta < -theta_threshold) {
> + direction = UP;
> + }
> +
> + switch (direction) {
> + case RIGHT:
> + uv.x = -xyz.z / xyz.x;
> + uv.y = xyz.y / xyz.x;
> + *face = TOP_RIGHT;
> + break;
> + case LEFT:
> + uv.x = -xyz.z / xyz.x;
> + uv.y = -xyz.y / xyz.x;
> + *face = TOP_LEFT;
> + break;
> + case UP:
> + uv.x = -xyz.x / xyz.y;
> + uv.y = -xyz.z / xyz.y;
> + *face = BOTTOM_RIGHT;
> + uv = rotate_cube_face(uv, ROT_270);
> + break;
> + case DOWN:
> + uv.x = xyz.x / xyz.y;
> + uv.y = -xyz.z / xyz.y;
> + *face = BOTTOM_LEFT;
> + uv = rotate_cube_face(uv, ROT_270);
> + break;
> + case FRONT:
> + uv.x = xyz.x / xyz.z;
> + uv.y = xyz.y / xyz.z;
> + *face = TOP_MIDDLE;
> + break;
> + case BACK:
> + uv.x = xyz.x / xyz.z;
> + uv.y = -xyz.y / xyz.z;
> + *face = BOTTOM_MIDDLE;
> + uv = rotate_cube_face(uv, ROT_90);
> + break;
> + }
> +
> + return uv;
> +}
> +
> +static float2 xyz_to_eac(float3 xyz, int2 size)
> +{
> + float pixel_pad = 2;
> + float u_pad = pixel_pad / size.x;
> + float v_pad = pixel_pad / size.y;
> +
> + int face;
> + int u_face, v_face;
> + float2 uv = xyz_to_cube(xyz, &face);
> +
> + u_face = face % 3;
> + v_face = face / 3;
> + //eac expansion
> + uv = M_2_PI_F * atan(uv) + 0.5f;
> +
> + uv.x = (uv.x + u_face) * (1.f - 2.f * u_pad) / 3.f + u_pad;
> + uv.y = uv.y * (0.5f - 2.f * v_pad) + v_pad + 0.5f * v_face;
> +
> + uv.x *= size.x;
> + uv.y *= size.y;
> +
> + return uv;
> +}
> +
> +const sampler_t sampler_nearest = (CLK_NORMALIZED_COORDS_FALSE |
> + CLK_ADDRESS_CLAMP_TO_EDGE |
> + CLK_FILTER_NEAREST);
> +
> +const sampler_t sampler_linear = (CLK_NORMALIZED_COORDS_FALSE |
> + CLK_ADDRESS_CLAMP_TO_EDGE |
> + CLK_FILTER_LINEAR);
> +
> +static float4 gopromax_to_eac(float2 uv, int overlap, __read_only image2d_t src)
> +{
> + int2 dim = get_image_dim(src);
> + int cube_size = dim.y;
> + int gap = (cube_size * 3 + overlap * 2 - dim.x) / 2;
> + float2 uv2 = uv;
> + float a = 0.f;
> + float4 val;
> + bool is_aligned;
> +
> + if (uv.x < cube_size || uv.x > cube_size * 2) {
> + int dx = 0;
> + int cs = cube_size - gap;
> + float cx = fmod(uv.x, cube_size) * cs / cube_size;
> + if (uv.x >= cube_size * 2)
> + dx = cube_size * 2 + overlap - gap;
> + if (cx >= (cs + overlap) / 2)
> + dx += overlap;
> + uv2.x = cx + dx;
> + if (cx > (cs - overlap) / 2 && cx < (cs + overlap) / 2)
> + a = (cx - (cs - overlap) / 2) / overlap;
> + } else {
> + uv2.x += overlap - gap;
> + }
> +
> + {
> + int2 d = convert_int2(ceil(uv2) - floor(uv2));
> + is_aligned = (d.x == 0 && d.y == 0);
> + }
> + if (is_aligned)
> + val = read_imagef(src, sampler_nearest, uv2);
> + else
> + val = read_imagef(src, sampler_linear, uv2);
> + if (a > 0.f) {
> + float4 val2;
> + uv2.x += overlap;
> + if (is_aligned)
> + val2 = read_imagef(src, sampler_nearest, uv2);
> + else
> + val2 = read_imagef(src, sampler_linear, uv2);
> + val = mix(val, val2, a);
> + }
> +
> + return val;
> +}
> +
> +__kernel void gopromax_equirectangular(__write_only image2d_t dst,
> + __read_only image2d_t front,
> + __read_only image2d_t rear,
> + int overlap)
> +{
> + float4 val;
> + int2 loc = (int2)(get_global_id(0), get_global_id(1));
> +
> + int2 dst_size = get_image_dim(dst);
> + int2 src_size = get_image_dim(front);
> + int2 eac_size = (int2)(src_size.y * 3, src_size.y * 2);
> +
> + float3 xyz = equirect_to_xyz(loc, dst_size);
> + float2 uv = xyz_to_eac(xyz, eac_size);
> +
> + if (uv.y >= src_size.y) {
> + uv.y -= src_size.y;
> + val = gopromax_to_eac(uv, overlap, rear);
> + } else {
> + val = gopromax_to_eac(uv, overlap, front);
> + }
> +
> + write_imagef(dst, loc, val);
> +}
> +
> +__kernel void gopromax_stack(__write_only image2d_t dst,
> + __read_only image2d_t front,
> + __read_only image2d_t rear,
> + int overlap)
> +{
> + float4 val;
> + int2 loc = (int2)(get_global_id(0), get_global_id(1));
> + int2 dst_size = get_image_dim(dst);
> + int2 src_size = get_image_dim(front);
> + float2 uv = convert_float2(loc);
> +
> + uv *= (float)src_size.y * 2 / dst_size.y;
> +
> + if (uv.y >= src_size.y) {
> + uv.y -= src_size.y;
> + val = gopromax_to_eac(uv, overlap, rear);
> + } else {
> + val = gopromax_to_eac(uv, overlap, front);
> + }
> +
> + write_imagef(dst, loc, val);
> +}
> diff --git a/libavfilter/opencl_source.h b/libavfilter/opencl_source.h
> index b6930fb686..92135c6a7d 100644
> --- a/libavfilter/opencl_source.h
> +++ b/libavfilter/opencl_source.h
> @@ -24,6 +24,7 @@ extern const char *ff_source_colorkey_cl;
> extern const char *ff_source_colorspace_common_cl;
> extern const char *ff_source_convolution_cl;
> extern const char *ff_source_deshake_cl;
> +extern const char *ff_source_gopromax_cl;
> extern const char *ff_source_neighbor_cl;
> extern const char *ff_source_nlmeans_cl;
> extern const char *ff_source_overlay_cl;
> diff --git a/libavfilter/vf_gopromax_opencl.c b/libavfilter/vf_gopromax_opencl.c
> new file mode 100644
> index 0000000000..6867ca5cbb
> --- /dev/null
> +++ b/libavfilter/vf_gopromax_opencl.c
> @@ -0,0 +1,351 @@
> +/*
> + * Copyright (c) 2021 Ronan LE MEILLAT
> + * Copyright (c) 2024 TADANO Tokumei
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "libavutil/log.h"
> +#include "libavutil/mem.h"
> +#include "libavutil/pixdesc.h"
> +#include "libavutil/opt.h"
> +
> +#include "avfilter.h"
> +#include "framesync.h"
> +#include "internal.h"
> +#include "opencl.h"
> +#include "opencl_source.h"
> +#include "video.h"
> +#include "v360.h"
> +
> +typedef struct GoProMaxOpenCLContext {
> + OpenCLFilterContext ocf;
> +
> + int initialised;
> + cl_kernel kernel;
> + cl_command_queue command_queue;
> +
> + FFFrameSync fs;
> +
> + int nb_planes;
> +
> + int out;
> + int width, height;
> + int overlap;
> +} GoProMaxOpenCLContext;
> +
> +static int gopromax_opencl_load(AVFilterContext *avctx,
> + enum AVPixelFormat front_format,
> + enum AVPixelFormat rear_format)
> +{
> + GoProMaxOpenCLContext *ctx = avctx->priv;
> + cl_int cle;
> + const char *source = ff_source_gopromax_cl;
> + const char *kernel;
> + const AVPixFmtDescriptor *front_desc, *rear_desc;
> + int err, i, front_planes, rear_planes;
> +
> + front_desc = av_pix_fmt_desc_get(front_format);
> + rear_desc = av_pix_fmt_desc_get(rear_format);
> + front_planes = rear_planes = 0;
> + for (i = 0; i < front_desc->nb_components; i++)
> + front_planes = FFMAX(front_planes,
> + front_desc->comp[i].plane + 1);
> + for (i = 0; i < rear_desc->nb_components; i++)
> + rear_planes = FFMAX(rear_planes,
> + rear_desc->comp[i].plane + 1);
> +
> + ctx->nb_planes = front_planes;
> +
> + switch (ctx->out) {
> + case EQUIRECTANGULAR:
> + kernel = "gopromax_equirectangular";
> + break;
> + case EQUIANGULAR:
> + kernel = "gopromax_stack";
> + break;
> + default:
> + av_log(ctx, AV_LOG_ERROR, "Specified output format is not handled.\n");
> + return AVERROR_BUG;
> + }
> +
> + av_log(avctx, AV_LOG_DEBUG, "Using kernel %s.\n", kernel);
> +
> + err = ff_opencl_filter_load_program(avctx, &source, 1);
> + if (err < 0)
> + goto fail;
> +
> + ctx->command_queue = clCreateCommandQueue(ctx->ocf.hwctx->context,
> + ctx->ocf.hwctx->device_id,
> + 0, &cle);
> + CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create OpenCL "
> + "command queue %d.\n", cle);
> +
> + ctx->kernel = clCreateKernel(ctx->ocf.program, kernel, &cle);
> + CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create kernel %d.\n", cle);
> +
> + ctx->initialised = 1;
> + return 0;
> +
> +fail:
> + if (ctx->command_queue)
> + clReleaseCommandQueue(ctx->command_queue);
> + if (ctx->kernel)
> + clReleaseKernel(ctx->kernel);
> + return err;
> +}
> +
> +static int gopromax_opencl_stack(FFFrameSync *fs)
> +{
> + AVFilterContext *avctx = fs->parent;
> + AVFilterLink *outlink = avctx->outputs[0];
> + GoProMaxOpenCLContext *ctx = avctx->priv;
> + AVFrame *input_front, *input_rear;
> + AVFrame *output;
> + cl_mem mem;
> + cl_int cle, overlap;
> + size_t global_work[2];
> + int kernel_arg = 0;
> + int err, plane;
> +
> + err = ff_framesync_get_frame(fs, 0, &input_front, 0);
> + if (err < 0)
> + return err;
> + err = ff_framesync_get_frame(fs, 1, &input_rear, 0);
> + if (err < 0)
> + return err;
> +
> + if (!ctx->initialised) {
> + AVHWFramesContext *front_fc =
> + (AVHWFramesContext*)input_front->hw_frames_ctx->data;
> + AVHWFramesContext *rear_fc =
> + (AVHWFramesContext*)input_rear->hw_frames_ctx->data;
> +
> + err = gopromax_opencl_load(avctx, front_fc->sw_format,
> + rear_fc->sw_format);
> + if (err < 0)
> + return err;
> + }
> +
> + output = ff_get_video_buffer(outlink, outlink->w, outlink->h);
> + if (!output) {
> + err = AVERROR(ENOMEM);
> + goto fail;
> + }
> +
> + for (plane = 0; plane < ctx->nb_planes; plane++) {
> + kernel_arg = 0;
> +
> + mem = (cl_mem)output->data[plane];
> + CL_SET_KERNEL_ARG(ctx->kernel, kernel_arg, cl_mem, &mem);
> + kernel_arg++;
> +
> + mem = (cl_mem)input_front->data[plane];
> + CL_SET_KERNEL_ARG(ctx->kernel, kernel_arg, cl_mem, &mem);
> + kernel_arg++;
> +
> + mem = (cl_mem)input_rear->data[plane];
> + CL_SET_KERNEL_ARG(ctx->kernel, kernel_arg, cl_mem, &mem);
> + kernel_arg++;
> +
> + overlap = ctx->overlap;
> + CL_SET_KERNEL_ARG(ctx->kernel, kernel_arg, cl_int, &overlap);
> + kernel_arg++;
> +
> + err = ff_opencl_filter_work_size_from_image(avctx, global_work,
> + output, plane, 0);
> + if (err < 0)
> + goto fail;
> +
> + av_log(avctx, AV_LOG_VERBOSE,
> + "In gopromax_opencl_stack for plane:%d %lux%lu frame size %dx%d\n",
> + plane, global_work[0], global_work[1], outlink->w, outlink->h);
> +
> + cle = clEnqueueNDRangeKernel(ctx->command_queue, ctx->kernel, 2, NULL,
> + global_work, NULL, 0, NULL, NULL);
> + CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to enqueue gopromax kernel "
> + "for plane %d: %d.\n", plane, cle);
> + }
> +
> + cle = clFinish(ctx->command_queue);
> + CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to finish command queue: %d.\n", cle);
> +
> + err = av_frame_copy_props(output, input_front);
> +
> + av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
> + av_get_pix_fmt_name(output->format),
> + output->width, output->height, output->pts);
> +
> + return ff_filter_frame(outlink, output);
> +
> +fail:
> + av_frame_free(&output);
> + return err;
> +}
> +
> +static int gopromax_opencl_config_output(AVFilterLink *outlink)
> +{
> + AVFilterContext *avctx = outlink->src;
> + GoProMaxOpenCLContext *ctx = avctx->priv;
> + int height = avctx->inputs[0]->h;
> + int err;
> +
> + switch (ctx->out) {
> + case EQUIRECTANGULAR:
> + if (ctx->width > 0 && ctx->height > 0) {
> + if (ctx->width != ctx->height * 2) {
> + av_log(ctx, AV_LOG_ERROR,
> + "Specified size (%dx%d) is not suitable.\n",
> + ctx->width, ctx->height);
> + return AVERROR(EINVAL);
> + }
> + ctx->ocf.output_width = ctx->width;
> + ctx->ocf.output_height = ctx->height;
> + } else if (ctx->width > 0 || ctx->height > 0) {
> + av_log(ctx, AV_LOG_ERROR,
> + "Both width and height values should be specified.\n");
> + return AVERROR(EINVAL);
> + } else {
> + ctx->ocf.output_width = 4 * height;
> + ctx->ocf.output_height = 2 * height;
> + }
> + break;
> + case EQUIANGULAR:
> + if (ctx->width > 0 && ctx->height > 0) {
> + if (ctx->width * 2 != ctx->height * 3) {
> + av_log(ctx, AV_LOG_ERROR,
> + "Specified size (%dx%d) is not suitable.\n",
> + ctx->width, ctx->height);
> + return AVERROR(EINVAL);
> + }
> + ctx->ocf.output_width = ctx->width;
> + ctx->ocf.output_height = ctx->height;
> + } else if (ctx->width > 0 || ctx->height > 0) {
> + av_log(ctx, AV_LOG_ERROR,
> + "Both width and height values should be specified.\n");
> + return AVERROR(EINVAL);
> + } else {
> + ctx->ocf.output_width = 3 * height;
> + ctx->ocf.output_height = 2 * height;
> + }
> + break;
> + default:
> + av_log(ctx, AV_LOG_ERROR, "Specified output format is not supported.\n");
> + return AVERROR(EINVAL);
> + }
> +
> + err = ff_opencl_filter_config_output(outlink);
> + if (err < 0)
> + return err;
> +
> + err = ff_framesync_init_dualinput(&ctx->fs, avctx);
> + if (err < 0)
> + return err;
> +
> + return ff_framesync_configure(&ctx->fs);
> +}
> +
> +static av_cold int gopromax_opencl_init(AVFilterContext *avctx)
> +{
> + GoProMaxOpenCLContext *ctx = avctx->priv;
> +
> + ctx->fs.on_event = &gopromax_opencl_stack;
> +
> + return ff_opencl_filter_init(avctx);
> +}
> +
> +static int gopromax_opencl_activate(AVFilterContext *avctx)
> +{
> + GoProMaxOpenCLContext *ctx = avctx->priv;
> +
> + return ff_framesync_activate(&ctx->fs);
> +}
> +
> +static av_cold void gopromax_opencl_uninit(AVFilterContext *avctx)
> +{
> + GoProMaxOpenCLContext *ctx = avctx->priv;
> + cl_int cle;
> +
> + if (ctx->kernel) {
> + cle = clReleaseKernel(ctx->kernel);
> + if (cle != CL_SUCCESS)
> + av_log(avctx, AV_LOG_ERROR, "Failed to release "
> + "kernel: %d.\n", cle);
> + }
> +
> + if (ctx->command_queue) {
> + cle = clReleaseCommandQueue(ctx->command_queue);
> + if (cle != CL_SUCCESS)
> + av_log(avctx, AV_LOG_ERROR, "Failed to release "
> + "command queue: %d.\n", cle);
> + }
> +
> + ff_opencl_filter_uninit(avctx);
> +
> + ff_framesync_uninit(&ctx->fs);
> +}
> +
> +#define OFFSET(x) offsetof(GoProMaxOpenCLContext, x)
> +#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
> +static const AVOption gopromax_opencl_options[] = {
> + { "output", "set output projection", OFFSET(out), AV_OPT_TYPE_INT, {.i64=EQUIRECTANGULAR}, 0, NB_PROJECTIONS-1, FLAGS, .unit = "out" },
> + { "e", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, .unit = "out" },
> + { "equirect", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, .unit = "out" },
> + { "eac", "equi-angular cubemap", 0, AV_OPT_TYPE_CONST, {.i64=EQUIANGULAR}, 0, 0, FLAGS, .unit = "out" },
> + { "w", "output width", OFFSET(width), AV_OPT_TYPE_INT, {.i64=0}, 0, INT16_MAX, FLAGS, .unit = "w"},
> + { "h", "output height", OFFSET(height), AV_OPT_TYPE_INT, {.i64=0}, 0, INT16_MAX, FLAGS, .unit = "h"},
> + { "overlap", "set overlapped pixels", OFFSET(overlap), AV_OPT_TYPE_INT, {.i64=64}, 0, 128, FLAGS, .unit = "overlap"},
> + { NULL },
> +};
> +
> +AVFILTER_DEFINE_CLASS(gopromax_opencl);
> +
> +static const AVFilterPad gopromax_opencl_inputs[] = {
> + {
> + .name = "front",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .config_props = &ff_opencl_filter_config_input,
> + },
> + {
> + .name = "rear",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .config_props = &ff_opencl_filter_config_input,
> + },
> +};
> +
> +static const AVFilterPad gopromax_opencl_outputs[] = {
> + {
> + .name = "default",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .config_props = &gopromax_opencl_config_output,
> + },
> +};
> +
> +const AVFilter ff_vf_gopromax_opencl = {
> + .name = "gopromax_opencl",
> + .description = NULL_IF_CONFIG_SMALL("GoProMax .360 to equirectangular projection"),
> + .priv_size = sizeof(GoProMaxOpenCLContext),
> + .priv_class = &gopromax_opencl_class,
> + .init = &gopromax_opencl_init,
> + .uninit = &gopromax_opencl_uninit,
> + .activate = &gopromax_opencl_activate,
> + FILTER_INPUTS(gopromax_opencl_inputs),
> + FILTER_OUTPUTS(gopromax_opencl_outputs),
> + FILTER_SINGLE_PIXFMT(AV_PIX_FMT_OPENCL),
> + .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
> + .flags = AVFILTER_FLAG_HWDEVICE,
> +};
More information about the ffmpeg-devel
mailing list