[FFmpeg-devel] [PATCH V3 2/3] avfilter/dnn: unify the layer execution function in native mode
Guo, Yejun
yejun.guo at intel.com
Wed Oct 9 17:08:11 EEST 2019
Signed-off-by: Guo, Yejun <yejun.guo at intel.com>
---
libavfilter/dnn/Makefile | 1 +
libavfilter/dnn/dnn_backend_native.c | 34 ++++------------------
libavfilter/dnn/dnn_backend_native.h | 4 ++-
libavfilter/dnn/dnn_backend_native_layer_conv2d.c | 4 ++-
libavfilter/dnn/dnn_backend_native_layer_conv2d.h | 3 +-
.../dnn/dnn_backend_native_layer_depth2space.c | 5 +++-
.../dnn/dnn_backend_native_layer_depth2space.h | 3 +-
libavfilter/dnn/dnn_backend_native_layer_maximum.c | 4 ++-
libavfilter/dnn/dnn_backend_native_layer_maximum.h | 3 +-
libavfilter/dnn/dnn_backend_native_layer_pad.c | 5 ++--
libavfilter/dnn/dnn_backend_native_layer_pad.h | 4 +--
libavfilter/dnn/dnn_backend_native_layers.c | 34 ++++++++++++++++++++++
libavfilter/dnn/dnn_backend_native_layers.h | 32 ++++++++++++++++++++
tests/dnn/dnn-layer-conv2d-test.c | 4 +--
tests/dnn/dnn-layer-depth2space-test.c | 4 ++-
15 files changed, 102 insertions(+), 42 deletions(-)
create mode 100644 libavfilter/dnn/dnn_backend_native_layers.c
create mode 100644 libavfilter/dnn/dnn_backend_native_layers.h
diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile
index 721094d..171f00e 100644
--- a/libavfilter/dnn/Makefile
+++ b/libavfilter/dnn/Makefile
@@ -1,5 +1,6 @@
OBJS-$(CONFIG_DNN) += dnn/dnn_interface.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native.o
+OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layers.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_depth2space.o
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index 97549d3..c8fb956 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -29,6 +29,7 @@
#include "dnn_backend_native_layer_conv2d.h"
#include "dnn_backend_native_layer_depth2space.h"
#include "dnn_backend_native_layer_maximum.h"
+#include "dnn_backend_native_layers.h"
static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{
@@ -331,10 +332,6 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
{
ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
int32_t layer;
- ConvolutionalParams *conv_params;
- DepthToSpaceParams *depth_to_space_params;
- LayerPadParams *pad_params;
- DnnLayerMaximumParams *maximum_params;
uint32_t nb = FFMIN(nb_output, network->nb_output);
if (network->layers_num <= 0 || network->operands_num <= 0)
@@ -343,30 +340,11 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
return DNN_ERROR;
for (layer = 0; layer < network->layers_num; ++layer){
- switch (network->layers[layer].type){
- case DLT_CONV2D:
- conv_params = (ConvolutionalParams *)network->layers[layer].params;
- convolve(network->operands, network->layers[layer].input_operand_indexes,
- network->layers[layer].output_operand_index, conv_params);
- break;
- case DLT_DEPTH_TO_SPACE:
- depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
- depth_to_space(network->operands, network->layers[layer].input_operand_indexes,
- network->layers[layer].output_operand_index, depth_to_space_params->block_size);
- break;
- case DLT_MIRROR_PAD:
- pad_params = (LayerPadParams *)network->layers[layer].params;
- dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
- network->layers[layer].output_operand_index, pad_params);
- break;
- case DLT_MAXIMUM:
- maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params;
- dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes,
- network->layers[layer].output_operand_index, maximum_params);
- break;
- case DLT_INPUT:
- return DNN_ERROR;
- }
+ DNNLayerType layer_type = network->layers[layer].type;
+ layer_funcs[layer_type](network->operands,
+ network->layers[layer].input_operand_indexes,
+ network->layers[layer].output_operand_index,
+ network->layers[layer].params);
}
for (uint32_t i = 0; i < nb; ++i) {
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index 761e5ed..9821390 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -33,13 +33,15 @@
/**
* the enum value of DNNLayerType should not be changed,
* the same values are used in convert_from_tensorflow.py
+ * and, it is used to index the layer execution function pointer.
*/
typedef enum {
DLT_INPUT = 0,
DLT_CONV2D = 1,
DLT_DEPTH_TO_SPACE = 2,
DLT_MIRROR_PAD = 3,
- DLT_MAXIMUM = 4
+ DLT_MAXIMUM = 4,
+ DLT_COUNT
} DNNLayerType;
typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index b13b431..594187f 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -23,7 +23,8 @@
#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
-int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
+int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters)
{
float *output;
int32_t input_operand_index = input_operand_indexes[0];
@@ -32,6 +33,7 @@ int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t
int width = operands[input_operand_index].dims[2];
int channel = operands[input_operand_index].dims[3];
const float *input = operands[input_operand_index].data;
+ const ConvolutionalParams *conv_params = (const ConvolutionalParams *)parameters;
int radius = conv_params->kernel_size >> 1;
int src_linesize = width * conv_params->input_num;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
index 7ddfff3..1dd84cb 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -35,5 +35,6 @@ typedef struct ConvolutionalParams{
float *biases;
} ConvolutionalParams;
-int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params);
+int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters);
#endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
index a248764..3720060 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
@@ -27,9 +27,12 @@
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_depth2space.h"
-int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size)
+int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters)
{
float *output;
+ const DepthToSpaceParams *params = (const DepthToSpaceParams *)parameters;
+ int block_size = params->block_size;
int32_t input_operand_index = input_operand_indexes[0];
int number = operands[input_operand_index].dims[0];
int height = operands[input_operand_index].dims[1];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
index 8708be8..c481bf1 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
@@ -34,6 +34,7 @@ typedef struct DepthToSpaceParams{
int block_size;
} DepthToSpaceParams;
-int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size);
+int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters);
#endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
index a2669af..6add170 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_maximum.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
@@ -27,10 +27,12 @@
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_maximum.h"
-int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params)
+int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters)
{
const DnnOperand *input = &operands[input_operand_indexes[0]];
DnnOperand *output = &operands[output_operand_index];
+ const DnnLayerMaximumParams *params = (const DnnLayerMaximumParams *)parameters;
int dims_count;
const float *src;
float *dst;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.h b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
index 6396e58..87f3bf5 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_maximum.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
@@ -37,6 +37,7 @@ typedef struct DnnLayerMaximumParams{
}val;
} DnnLayerMaximumParams;
-int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params);
+int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters);
#endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.c b/libavfilter/dnn/dnn_backend_native_layer_pad.c
index c2905a7..f5c5727 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.c
@@ -48,12 +48,13 @@ static int after_get_buddy(int given, int border, LayerPadModeParam mode)
}
}
-int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index,
- const LayerPadParams *params)
+int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters)
{
int32_t before_paddings;
int32_t after_paddings;
float* output;
+ const LayerPadParams *params = (const LayerPadParams *)parameters;
// suppose format is <N, H, W, C>
int32_t input_operand_index = input_operand_indexes[0];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.h b/libavfilter/dnn/dnn_backend_native_layer_pad.h
index 7cc8213..036ff7b 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.h
@@ -36,7 +36,7 @@ typedef struct LayerPadParams{
float constant_values;
} LayerPadParams;
-int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index,
- const LayerPadParams *params);
+int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters);
#endif
diff --git a/libavfilter/dnn/dnn_backend_native_layers.c b/libavfilter/dnn/dnn_backend_native_layers.c
new file mode 100644
index 0000000..5f81a09
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layers.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 Guo Yejun
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+#include "dnn_backend_native_layers.h"
+#include "dnn_backend_native_layer_pad.h"
+#include "dnn_backend_native_layer_conv2d.h"
+#include "dnn_backend_native_layer_depth2space.h"
+#include "dnn_backend_native_layer_maximum.h"
+
+LAYER_EXEC_FUNC layer_funcs[DLT_COUNT] = {
+ NULL,
+ dnn_execute_layer_conv2d,
+ dnn_execute_layer_depth2space,
+ dnn_execute_layer_pad,
+ dnn_execute_layer_maximum,
+};
\ No newline at end of file
diff --git a/libavfilter/dnn/dnn_backend_native_layers.h b/libavfilter/dnn/dnn_backend_native_layers.h
new file mode 100644
index 0000000..3276aee
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layers.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 Guo Yejun
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYERS_H
+#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYERS_H
+
+#include <stdint.h>
+#include "dnn_backend_native.h"
+
+typedef int (*LAYER_EXEC_FUNC)(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters);
+
+extern LAYER_EXEC_FUNC layer_funcs[DLT_COUNT];
+
+#endif
diff --git a/tests/dnn/dnn-layer-conv2d-test.c b/tests/dnn/dnn-layer-conv2d-test.c
index afc5391..9d13da3 100644
--- a/tests/dnn/dnn-layer-conv2d-test.c
+++ b/tests/dnn/dnn-layer-conv2d-test.c
@@ -113,7 +113,7 @@ static int test_with_same_dilate(void)
operands[1].data = NULL;
input_indexes[0] = 0;
- convolve(operands, input_indexes, 1, ¶ms);
+ dnn_execute_layer_conv2d(operands, input_indexes, 1, ¶ms);
output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -212,7 +212,7 @@ static int test_with_valid(void)
operands[1].data = NULL;
input_indexes[0] = 0;
- convolve(operands, input_indexes, 1, ¶ms);
+ dnn_execute_layer_conv2d(operands, input_indexes, 1, ¶ms);
output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
diff --git a/tests/dnn/dnn-layer-depth2space-test.c b/tests/dnn/dnn-layer-depth2space-test.c
index 87118de..5225ec7 100644
--- a/tests/dnn/dnn-layer-depth2space-test.c
+++ b/tests/dnn/dnn-layer-depth2space-test.c
@@ -48,6 +48,7 @@ static int test(void)
print(list(output.flatten()))
*/
+ DepthToSpaceParams params;
DnnOperand operands[2];
int32_t input_indexes[1];
float input[1*5*3*4] = {
@@ -79,7 +80,8 @@ static int test(void)
operands[1].data = NULL;
input_indexes[0] = 0;
- depth_to_space(operands, input_indexes, 1, 2);
+ params.block_size = 2;
+ dnn_execute_layer_depth2space(operands, input_indexes, 1, ¶ms);
output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
--
2.7.4
More information about the ffmpeg-devel
mailing list