[FFmpeg-devel] [PATCH V2] dnn/native: add log error message

Ting Fu ting.fu at intel.com
Mon Aug 17 08:35:18 EEST 2020


Signed-off-by: Ting Fu <ting.fu at intel.com>
---
V2:
    Fix the issue in V1: make fate failed

 libavfilter/dnn/dnn_backend_native.c          | 74 +++++++++++++++----
 libavfilter/dnn/dnn_backend_native.h          |  5 ++
 .../dnn/dnn_backend_native_layer_avgpool.c    |  2 +-
 .../dnn/dnn_backend_native_layer_avgpool.h    |  2 +-
 .../dnn/dnn_backend_native_layer_conv2d.c     |  2 +-
 .../dnn/dnn_backend_native_layer_conv2d.h     |  2 +-
 .../dnn_backend_native_layer_depth2space.c    |  2 +-
 .../dnn_backend_native_layer_depth2space.h    |  2 +-
 .../dnn/dnn_backend_native_layer_mathbinary.c | 10 ++-
 .../dnn/dnn_backend_native_layer_mathbinary.h |  2 +-
 .../dnn/dnn_backend_native_layer_mathunary.c  | 10 ++-
 .../dnn/dnn_backend_native_layer_mathunary.h  |  2 +-
 .../dnn/dnn_backend_native_layer_maximum.c    |  2 +-
 .../dnn/dnn_backend_native_layer_maximum.h    |  2 +-
 .../dnn/dnn_backend_native_layer_pad.c        |  2 +-
 .../dnn/dnn_backend_native_layer_pad.h        |  2 +-
 libavfilter/dnn/dnn_backend_native_layers.h   |  2 +-
 tests/dnn/dnn-layer-avgpool-test.c            |  4 +-
 tests/dnn/dnn-layer-conv2d-test.c             |  4 +-
 tests/dnn/dnn-layer-depth2space-test.c        |  2 +-
 tests/dnn/dnn-layer-mathbinary-test.c         |  6 +-
 tests/dnn/dnn-layer-mathunary-test.c          |  2 +-
 tests/dnn/dnn-layer-maximum-test.c            |  2 +-
 tests/dnn/dnn-layer-pad-test.c                |  6 +-
 24 files changed, 103 insertions(+), 48 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index adc652a2c4..6ddffa54af 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -28,15 +28,30 @@
 #include "dnn_backend_native_layer_conv2d.h"
 #include "dnn_backend_native_layers.h"
 
+static const AVClass dnn_native_class = {
+    .class_name = "dnn_native",
+    .item_name  = av_default_item_name,
+    .option     = NULL,
+    .version    = LIBAVUTIL_VERSION_INT,
+    .category   = AV_CLASS_CATEGORY_FILTER,
+};
+
+NetworkContext network_ctx = {
+    .class      = &dnn_native_class,
+};
+
 static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name)
 {
     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
+    NetworkContext *ctx = network->log_ctx;
 
     for (int i = 0; i < network->operands_num; ++i) {
         DnnOperand *oprd = &network->operands[i];
         if (strcmp(oprd->name, input_name) == 0) {
-            if (oprd->type != DOT_INPUT)
+            if (oprd->type != DOT_INPUT) {
+                av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", input_name);
                 return DNN_ERROR;
+            }
             input->dt = oprd->data_type;
             av_assert0(oprd->dims[0] == 1);
             input->height = oprd->dims[1];
@@ -47,30 +62,37 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i
     }
 
     // do not find the input operand
+    av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
     return DNN_ERROR;
 }
 
 static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
 {
     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
+    NetworkContext *ctx = network->log_ctx;
     DnnOperand *oprd = NULL;
 
-    if (network->layers_num <= 0 || network->operands_num <= 0)
+    if (network->layers_num <= 0 || network->operands_num <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "No operands or layers in model\n");
         return DNN_ERROR;
+    }
 
     /* inputs */
     for (int i = 0; i < network->operands_num; ++i) {
         oprd = &network->operands[i];
         if (strcmp(oprd->name, input_name) == 0) {
-            if (oprd->type != DOT_INPUT)
+            if (oprd->type != DOT_INPUT) {
+                av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", input_name);
                 return DNN_ERROR;
+            }
             break;
         }
         oprd = NULL;
     }
-
-    if (!oprd)
+    if (!oprd) {
+        av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
         return DNN_ERROR;
+    }
 
     oprd->dims[0] = 1;
     oprd->dims[1] = input->height;
@@ -79,11 +101,15 @@ static DNNReturnType set_input_output_native(void *model, DNNData *input, const
 
     av_freep(&oprd->data);
     oprd->length = calculate_operand_data_length(oprd);
-    if (oprd->length <= 0)
+    if (oprd->length <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "The input data length overflow\n");
         return DNN_ERROR;
+    }
     oprd->data = av_malloc(oprd->length);
-    if (!oprd->data)
+    if (!oprd->data) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to malloc memory for input data\n");
         return DNN_ERROR;
+    }
 
     input->data = oprd->data;
 
@@ -91,8 +117,10 @@ static DNNReturnType set_input_output_native(void *model, DNNData *input, const
     network->nb_output = 0;
     av_freep(&network->output_indexes);
     network->output_indexes = av_mallocz_array(nb_output, sizeof(*network->output_indexes));
-    if (!network->output_indexes)
+    if (!network->output_indexes) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to malloc memory for output\n");
         return DNN_ERROR;
+    }
 
     for (uint32_t i = 0; i < nb_output; ++i) {
         const char *output_name = output_names[i];
@@ -105,8 +133,10 @@ static DNNReturnType set_input_output_native(void *model, DNNData *input, const
         }
     }
 
-    if (network->nb_output != nb_output)
+    if (network->nb_output != nb_output) {
+        av_log(ctx, AV_LOG_ERROR, "Output(s) name are not all set correctly\n");
         return DNN_ERROR;
+    }
 
     return DNN_SUCCESS;
 }
@@ -128,6 +158,7 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio
     int32_t layer;
     DNNLayerType layer_type;
 
+
     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
         return NULL;
     }
@@ -171,6 +202,8 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *optio
     if (!network){
         goto fail;
     }
+
+    network->log_ctx = &network_ctx;
     model->model = (void *)network;
 
     avio_seek(model_file_context, file_size - 8, SEEK_SET);
@@ -258,20 +291,29 @@ fail:
 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
 {
     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
+    NetworkContext *ctx = network->log_ctx;
     int32_t layer;
     uint32_t nb = FFMIN(nb_output, network->nb_output);
 
-    if (network->layers_num <= 0 || network->operands_num <= 0)
+    if (network->layers_num <= 0 || network->operands_num <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "Error network layer number: %d\n", network->layers_num);
         return DNN_ERROR;
-    if (!network->operands[0].data)
+    }
+    if (!network->operands[0].data) {
+        av_log(ctx, AV_LOG_ERROR, "Empty network input data\n");
         return DNN_ERROR;
+    }
 
-    for (layer = 0; layer < network->layers_num; ++layer){
+    for (layer = 0; layer < network->layers_num; ++layer) {
         DNNLayerType layer_type = network->layers[layer].type;
-        layer_funcs[layer_type].pf_exec(network->operands,
-                                  network->layers[layer].input_operand_indexes,
-                                  network->layers[layer].output_operand_index,
-                                  network->layers[layer].params);
+        if (layer_funcs[layer_type].pf_exec(network->operands,
+                                            network->layers[layer].input_operand_indexes,
+                                            network->layers[layer].output_operand_index,
+                                            network->layers[layer].params,
+                                            network->log_ctx) == DNN_ERROR) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to execuet network operands\n");
+            return DNN_ERROR;
+        }
     }
 
     for (uint32_t i = 0; i < nb; ++i) {
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index b455e44992..954a4187d1 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -106,8 +106,13 @@ typedef struct InputParams{
     int height, width, channels;
 } InputParams;
 
+typedef struct NetworkContext {
+    const AVClass *class;
+} NetworkContext;
+
 // Represents simple feed-forward convolutional network.
 typedef struct ConvolutionalNetwork{
+    NetworkContext *log_ctx;
     Layer *layers;
     int32_t layers_num;
     DnnOperand *operands;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
index d745c35b4a..98d670f965 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
@@ -56,7 +56,7 @@ int dnn_load_layer_avg_pool(Layer *layer, AVIOContext *model_file_context, int f
 }
 
 int dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_operand_indexes,
-                             int32_t output_operand_index, const void *parameters)
+                             int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     float *output;
     int height_end, width_end, height_radius, width_radius, output_height, output_width, kernel_area;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_avgpool.h b/libavfilter/dnn/dnn_backend_native_layer_avgpool.h
index 8e31ddb7c8..7c7623803a 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_avgpool.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_avgpool.h
@@ -35,6 +35,6 @@ typedef struct AvgPoolParams{
 
 int dnn_load_layer_avg_pool(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_operand_indexes,
-                             int32_t output_operand_index, const void *parameters);
+                             int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index a2202e4073..1b898813a9 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -89,7 +89,7 @@ int dnn_load_layer_conv2d(Layer *layer, AVIOContext *model_file_context, int fil
 }
 
 int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
-                             int32_t output_operand_index, const void *parameters)
+                             int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     float *output;
     int32_t input_operand_index = input_operand_indexes[0];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
index b240b7ef6b..8cee63e558 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -37,5 +37,5 @@ typedef struct ConvolutionalParams{
 
 int dnn_load_layer_conv2d(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
-                             int32_t output_operand_index, const void *parameters);
+                             int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
index 2c8bddf23d..54fee37406 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
@@ -50,7 +50,7 @@ int dnn_load_layer_depth2space(Layer *layer, AVIOContext *model_file_context, in
 }
 
 int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
-                                  int32_t output_operand_index, const void *parameters)
+                                  int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     float *output;
     const DepthToSpaceParams *params = (const DepthToSpaceParams *)parameters;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
index b2901e0141..6a6f52f722 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
@@ -36,6 +36,6 @@ typedef struct DepthToSpaceParams{
 
 int dnn_load_layer_depth2space(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
-                                  int32_t output_operand_index, const void *parameters);
+                                  int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
index dd42c329a9..413120d0bd 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
@@ -77,7 +77,7 @@ int dnn_load_layer_math_binary(Layer *layer, AVIOContext *model_file_context, in
 }
 
 int dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_operand_indexes,
-                                 int32_t output_operand_index, const void *parameters)
+                                 int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     const DnnOperand *input = &operands[input_operand_indexes[0]];
     DnnOperand *output = &operands[output_operand_index];
@@ -91,11 +91,15 @@ int dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_ope
 
     output->data_type = input->data_type;
     output->length = calculate_operand_data_length(output);
-    if (output->length <= 0)
+    if (output->length <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
         return DNN_ERROR;
+    }
     output->data = av_realloc(output->data, output->length);
-    if (!output->data)
+    if (!output->data) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
         return DNN_ERROR;
+    }
 
     dims_count = calculate_operand_dims_count(output);
     src = input->data;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.h b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.h
index 0acf3b0ea0..b2a0105fe6 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.h
@@ -48,6 +48,6 @@ typedef struct DnnLayerMathBinaryParams{
 
 int dnn_load_layer_math_binary(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_operand_indexes,
-                                 int32_t output_operand_index, const void *parameters);
+                                 int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
index 58ee0e9d3d..4758d147f2 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
@@ -53,7 +53,7 @@ int dnn_load_layer_math_unary(Layer *layer, AVIOContext *model_file_context, int
 }
 
 int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_operand_indexes,
-                                int32_t output_operand_index, const void *parameters)
+                                int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     const DnnOperand *input = &operands[input_operand_indexes[0]];
     DnnOperand *output = &operands[output_operand_index];
@@ -67,11 +67,15 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_oper
 
     output->data_type = input->data_type;
     output->length = calculate_operand_data_length(output);
-    if (output->length <= 0)
+    if (output->length <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
         return DNN_ERROR;
+    }
     output->data = av_realloc(output->data, output->length);
-    if (!output->data)
+    if (!output->data) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
         return DNN_ERROR;
+    }
 
     dims_count = calculate_operand_dims_count(output);
     src = input->data;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
index d6a61effd5..ba16c06912 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
@@ -55,6 +55,6 @@ typedef struct DnnLayerMathUnaryParams{
 
 int dnn_load_layer_math_unary(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_operand_indexes,
-                                int32_t output_operand_index, const void *parameters);
+                                int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
index cdddfdd87b..98323abff3 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_maximum.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
@@ -50,7 +50,7 @@ int dnn_load_layer_maximum(Layer *layer, AVIOContext *model_file_context, int fi
 }
 
 int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes,
-                              int32_t output_operand_index, const void *parameters)
+                              int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     const DnnOperand *input = &operands[input_operand_indexes[0]];
     DnnOperand *output = &operands[output_operand_index];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.h b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
index c049c63fd8..b8c25da958 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_maximum.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
@@ -39,6 +39,6 @@ typedef struct DnnLayerMaximumParams{
 
 int dnn_load_layer_maximum(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes,
-                              int32_t output_operand_index, const void *parameters);
+                              int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.c b/libavfilter/dnn/dnn_backend_native_layer_pad.c
index feaab001e8..01d1edcc6e 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.c
@@ -76,7 +76,7 @@ static int after_get_buddy(int given, int border, LayerPadModeParam mode)
 }
 
 int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes,
-                          int32_t output_operand_index, const void *parameters)
+                          int32_t output_operand_index, const void *parameters, NetworkContext *ctx)
 {
     int32_t before_paddings;
     int32_t after_paddings;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.h b/libavfilter/dnn/dnn_backend_native_layer_pad.h
index 18e05bdd5c..bf84924a68 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.h
@@ -38,6 +38,6 @@ typedef struct LayerPadParams{
 
 int dnn_load_layer_pad(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes,
-                          int32_t output_operand_index, const void *parameters);
+                          int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layers.h b/libavfilter/dnn/dnn_backend_native_layers.h
index b696e9c6fa..f6dcdc6ecb 100644
--- a/libavfilter/dnn/dnn_backend_native_layers.h
+++ b/libavfilter/dnn/dnn_backend_native_layers.h
@@ -25,7 +25,7 @@
 #include "dnn_backend_native.h"
 
 typedef int (*LAYER_EXEC_FUNC)(DnnOperand *operands, const int32_t *input_operand_indexes,
-                               int32_t output_operand_index, const void *parameters);
+                               int32_t output_operand_index, const void *parameters, NetworkContext *ctx);
 typedef int (*LAYER_LOAD_FUNC)(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
 
 typedef struct LayerFunc {
diff --git a/tests/dnn/dnn-layer-avgpool-test.c b/tests/dnn/dnn-layer-avgpool-test.c
index d7c33a0e88..0e6be8ba57 100644
--- a/tests/dnn/dnn-layer-avgpool-test.c
+++ b/tests/dnn/dnn-layer-avgpool-test.c
@@ -91,7 +91,7 @@ static int test_with_same(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params);
+    dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) {
@@ -171,7 +171,7 @@ static int test_with_valid(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params);
+    dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) {
diff --git a/tests/dnn/dnn-layer-conv2d-test.c b/tests/dnn/dnn-layer-conv2d-test.c
index 2da01e5372..836839cc64 100644
--- a/tests/dnn/dnn-layer-conv2d-test.c
+++ b/tests/dnn/dnn-layer-conv2d-test.c
@@ -114,7 +114,7 @@ static int test_with_same_dilate(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_conv2d(operands, input_indexes, 1, &params);
+    dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -214,7 +214,7 @@ static int test_with_valid(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_conv2d(operands, input_indexes, 1, &params);
+    dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
diff --git a/tests/dnn/dnn-layer-depth2space-test.c b/tests/dnn/dnn-layer-depth2space-test.c
index 5225ec7b7a..2c641884c1 100644
--- a/tests/dnn/dnn-layer-depth2space-test.c
+++ b/tests/dnn/dnn-layer-depth2space-test.c
@@ -81,7 +81,7 @@ static int test(void)
 
     input_indexes[0] = 0;
     params.block_size = 2;
-    dnn_execute_layer_depth2space(operands, input_indexes, 1, &params);
+    dnn_execute_layer_depth2space(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
diff --git a/tests/dnn/dnn-layer-mathbinary-test.c b/tests/dnn/dnn-layer-mathbinary-test.c
index e7f8f8557c..187a2b9bf2 100644
--- a/tests/dnn/dnn-layer-mathbinary-test.c
+++ b/tests/dnn/dnn-layer-mathbinary-test.c
@@ -69,7 +69,7 @@ static int test_broadcast_input0(DNNMathBinaryOperation op)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_math_binary(operands, input_indexes, 1, &params);
+    dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
@@ -109,7 +109,7 @@ static int test_broadcast_input1(DNNMathBinaryOperation op)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_math_binary(operands, input_indexes, 1, &params);
+    dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
@@ -157,7 +157,7 @@ static int test_no_broadcast(DNNMathBinaryOperation op)
 
     input_indexes[0] = 0;
     input_indexes[1] = 1;
-    dnn_execute_layer_math_binary(operands, input_indexes, 2, &params);
+    dnn_execute_layer_math_binary(operands, input_indexes, 2, &params, NULL);
 
     output = operands[2].data;
     for (int i = 0; i < sizeof(input0) / sizeof(float); i++) {
diff --git a/tests/dnn/dnn-layer-mathunary-test.c b/tests/dnn/dnn-layer-mathunary-test.c
index e9235120f3..ce14c41311 100644
--- a/tests/dnn/dnn-layer-mathunary-test.c
+++ b/tests/dnn/dnn-layer-mathunary-test.c
@@ -87,7 +87,7 @@ static int test(DNNMathUnaryOperation op)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_math_unary(operands, input_indexes, 1, &params);
+    dnn_execute_layer_math_unary(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); ++i) {
diff --git a/tests/dnn/dnn-layer-maximum-test.c b/tests/dnn/dnn-layer-maximum-test.c
index 06daf64481..c982670591 100644
--- a/tests/dnn/dnn-layer-maximum-test.c
+++ b/tests/dnn/dnn-layer-maximum-test.c
@@ -45,7 +45,7 @@ static int test(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_maximum(operands, input_indexes, 1, &params);
+    dnn_execute_layer_maximum(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
diff --git a/tests/dnn/dnn-layer-pad-test.c b/tests/dnn/dnn-layer-pad-test.c
index ea8c824d1e..6a72adb3ae 100644
--- a/tests/dnn/dnn-layer-pad-test.c
+++ b/tests/dnn/dnn-layer-pad-test.c
@@ -79,7 +79,7 @@ static int test_with_mode_symmetric(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_pad(operands, input_indexes, 1, &params);
+    dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -144,7 +144,7 @@ static int test_with_mode_reflect(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_pad(operands, input_indexes, 1, &params);
+    dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -210,7 +210,7 @@ static int test_with_mode_constant(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_pad(operands, input_indexes, 1, &params);
+    dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
-- 
2.17.1



More information about the ffmpeg-devel mailing list