[FFmpeg-devel] [PATCH V2 3/4] avfilter/dnn: add a new interface to query dnn model's input info

Pedro Arthur bygrandao at gmail.com
Wed Oct 30 16:52:24 EET 2019


Em seg, 21 de out de 2019 às 09:44, Guo, Yejun <yejun.guo at intel.com>
escreveu:

> to support dnn networks more general, we need to know the input info
> of the dnn model.
>
> background:
> The data type of dnn model's input could be float32, uint8 or fp16, etc.
> And the w/h of input image could be fixed or variable.
>
> Signed-off-by: Guo, Yejun <yejun.guo at intel.com>
> ---
>  libavfilter/dnn/dnn_backend_native.c | 24 +++++++++++++++++++++++-
>  libavfilter/dnn/dnn_backend_tf.c     | 32 ++++++++++++++++++++++++++++++++
>  libavfilter/dnn_interface.h          |  3 +++
>  3 files changed, 58 insertions(+), 1 deletion(-)
>
> diff --git a/libavfilter/dnn/dnn_backend_native.c
> b/libavfilter/dnn/dnn_backend_native.c
> index add1db4..94634b3 100644
> --- a/libavfilter/dnn/dnn_backend_native.c
> +++ b/libavfilter/dnn/dnn_backend_native.c
> @@ -28,6 +28,28 @@
>  #include "dnn_backend_native_layer_conv2d.h"
>  #include "dnn_backend_native_layers.h"
>
> +static DNNReturnType get_input_native(void *model, DNNData *input, const
> char *input_name)
> +{
> +    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
> +
> +    for (int i = 0; i < network->operands_num; ++i) {
> +        DnnOperand *oprd = &network->operands[i];
> +        if (strcmp(oprd->name, input_name) == 0) {
> +            if (oprd->type != DOT_INPUT)
> +                return DNN_ERROR;
> +            input->dt = oprd->data_type;
> +            av_assert0(oprd->dims[0] == 1);
> +            input->height = oprd->dims[1];
> +            input->width = oprd->dims[2];
> +            input->channels = oprd->dims[3];
> +            return DNN_SUCCESS;
> +        }
> +    }
> +
> +    // do not find the input operand
> +    return DNN_ERROR;
> +}
> +
>  static DNNReturnType set_input_output_native(void *model, DNNData *input,
> const char *input_name, const char **output_names, uint32_t nb_output)
>  {
>      ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
> @@ -37,7 +59,6 @@ static DNNReturnType set_input_output_native(void
> *model, DNNData *input, const
>          return DNN_ERROR;
>
>      /* inputs */
> -    av_assert0(input->dt == DNN_FLOAT);
>      for (int i = 0; i < network->operands_num; ++i) {
>          oprd = &network->operands[i];
>          if (strcmp(oprd->name, input_name) == 0) {
> @@ -234,6 +255,7 @@ DNNModel *ff_dnn_load_model_native(const char
> *model_filename)
>      }
>
>      model->set_input_output = &set_input_output_native;
> +    model->get_input = &get_input_native;
>
>      return model;
>  }
> diff --git a/libavfilter/dnn/dnn_backend_tf.c
> b/libavfilter/dnn/dnn_backend_tf.c
> index ed91d05..a921667 100644
> --- a/libavfilter/dnn/dnn_backend_tf.c
> +++ b/libavfilter/dnn/dnn_backend_tf.c
> @@ -105,6 +105,37 @@ static TF_Tensor *allocate_input_tensor(const DNNData
> *input)
>                               input_dims[1] * input_dims[2] *
> input_dims[3] * size);
>  }
>
> +static DNNReturnType get_input_tf(void *model, DNNData *input, const char
> *input_name)
> +{
> +    TFModel *tf_model = (TFModel *)model;
> +    TF_Status *status;
> +    int64_t dims[4];
> +
> +    TF_Output tf_output;
> +    tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
> +    if (!tf_output.oper)
> +        return DNN_ERROR;
> +
> +    tf_output.index = 0;
> +    input->dt = TF_OperationOutputType(tf_output);
> +
> +    status = TF_NewStatus();
> +    TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
> +    if (TF_GetCode(status) != TF_OK){
> +        TF_DeleteStatus(status);
> +        return DNN_ERROR;
> +    }
> +    TF_DeleteStatus(status);
> +
> +    // currently only NHWC is supported
> +    av_assert0(dims[0] == 1);
> +    input->height = dims[1];
> +    input->width = dims[2];
> +    input->channels = dims[3];
> +
> +    return DNN_SUCCESS;
> +}
> +
>  static DNNReturnType set_input_output_tf(void *model, DNNData *input,
> const char *input_name, const char **output_names, uint32_t nb_output)
>  {
>      TFModel *tf_model = (TFModel *)model;
> @@ -568,6 +599,7 @@ DNNModel *ff_dnn_load_model_tf(const char
> *model_filename)
>
>      model->model = (void *)tf_model;
>      model->set_input_output = &set_input_output_tf;
> +    model->get_input = &get_input_tf;
>
>      return model;
>  }
> diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
> index fdefcb7..b20e5c8 100644
> --- a/libavfilter/dnn_interface.h
> +++ b/libavfilter/dnn_interface.h
> @@ -43,6 +43,9 @@ typedef struct DNNData{
>  typedef struct DNNModel{
>      // Stores model that can be different for different backends.
>      void *model;
> +    // Gets model input information
> +    // Just reuse struct DNNData here, actually the DNNData.data field is
> not needed.
> +    DNNReturnType (*get_input)(void *model, DNNData *input, const char
> *input_name);
>      // Sets model input and output.
>      // Should be called at least once before model execution.
>      DNNReturnType (*set_input_output)(void *model, DNNData *input, const
> char *input_name, const char **output_names, uint32_t nb_output);
> --
> 2.7.4
>
LGTM
Should push soon.


>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request at ffmpeg.org with subject "unsubscribe".


More information about the ffmpeg-devel mailing list