diff options
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/Types.h | 75 | ||||
-rw-r--r-- | arm_compute/runtime/FunctionDescriptors.h | 8 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h | 42 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEGEMM.h | 11 |
4 files changed, 93 insertions, 43 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index c87c97cb06..66e1c8ab1f 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -774,10 +774,10 @@ public: private: std::pair<unsigned int, unsigned int> _stride; - unsigned int _pad_left; - unsigned int _pad_top; - unsigned int _pad_right; - unsigned int _pad_bottom; + unsigned int _pad_left; + unsigned int _pad_top; + unsigned int _pad_right; + unsigned int _pad_bottom; DimensionRoundingType _round_type; }; @@ -919,14 +919,14 @@ public: } private: - std::vector<float> _min_sizes; - std::vector<float> _variances; - float _offset; - bool _flip; - bool _clip; - std::vector<float> _max_sizes; - std::vector<float> _aspect_ratios; - Coordinates2D _img_size; + std::vector<float> _min_sizes; + std::vector<float> _variances; + float _offset; + bool _flip; + bool _clip; + std::vector<float> _max_sizes; + std::vector<float> _aspect_ratios; + Coordinates2D _img_size; std::array<float, 2> _steps; }; @@ -1171,15 +1171,15 @@ public: } private: - unsigned int _max_detections; - unsigned int _max_classes_per_detection; - float _nms_score_threshold; - float _iou_threshold; - unsigned int _num_classes; + unsigned int _max_detections; + unsigned int _max_classes_per_detection; + float _nms_score_threshold; + float _iou_threshold; + unsigned int _num_classes; std::array<float, 4> _scales_values; - bool _use_regular_nms; - unsigned int _detection_per_class; - bool _dequantize_scores; + bool _use_regular_nms; + unsigned int _detection_per_class; + bool _dequantize_scores; }; /** Pooling Layer Information struct*/ @@ -1612,13 +1612,13 @@ public: } private: - float _img_width; - float _img_height; - float _scale; - bool _apply_scale; - bool _correct_transform_coords; + float _img_width; + float _img_height; + float _scale; + bool _apply_scale; + bool _correct_transform_coords; std::array<float, 4> _weights; - float _bbox_xform_clip; + float _bbox_xform_clip; }; /** Activation Layer Information class */ @@ -2053,6 +2053,11 @@ public: { return _weight_format; } + void set_weight_format(arm_compute::WeightFormat weight_format) + { + _weight_format = weight_format; + } + unsigned int kernel_width() const { return _kernel_width; @@ -2495,11 +2500,29 @@ public: return _fixed_format; } + /** Set fixed-format flag + * + * @param[in] fixed_format sets whether or not to use fixed-format kernels + */ + void set_fixed_format(bool fixed_format) + { + _fixed_format = fixed_format; + } + arm_compute::WeightFormat weight_format() const { return _weight_format; } + /** Set weight format to be used + * + * @param[in] weight_format arm_compute::WeightFormat enumeration + */ + void set_weight_format(arm_compute::WeightFormat weight_format) + { + _weight_format = weight_format; + } + private: bool _is_a_reshaped; bool _is_b_reshaped; diff --git a/arm_compute/runtime/FunctionDescriptors.h b/arm_compute/runtime/FunctionDescriptors.h index face8a6fb4..af79820bc3 100644 --- a/arm_compute/runtime/FunctionDescriptors.h +++ b/arm_compute/runtime/FunctionDescriptors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -62,8 +62,9 @@ struct Conv2dInfo const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups, - const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {}) - : conv_info(conv_info), dilation(dilation), act_info(act_info), enable_fast_math(enable_fast_math), num_groups(num_groups), post_ops(post_ops) + const experimental::PostOpList<ITensorInfo *> &post_ops = experimental::PostOpList<ITensorInfo *> {}, + const WeightsInfo &weights_info = WeightsInfo()) + : conv_info(conv_info), dilation(dilation), act_info(act_info), enable_fast_math(enable_fast_math), num_groups(num_groups), post_ops(post_ops), weights_info(weights_info) { } @@ -73,6 +74,7 @@ struct Conv2dInfo bool enable_fast_math{ false }; unsigned int num_groups{ 1 }; experimental::PostOpList<ITensorInfo *> post_ops{}; + WeightsInfo weights_info{}; }; /** Descriptor used by the 3d Convolution function */ diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h index aa96716d38..2b4f848b22 100644 --- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h +++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -112,20 +112,21 @@ public: * |QASYMM8 |QASYMM8 |S32 |QASYMM8 | * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED | * - * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] weights Weights tensor. The weights must be 2 dimensional. - * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions. - * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension. - * Data type supported: Same as @p input. - * @param[in] biases Bias tensor. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED. - * @param[out] output Destination tensor. Its shape should be equal to the output of a matrix multiplication between: - * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer - * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer. - * Data type supported: Same as @p input. - * @param[in] fc_info (Optional) Fully connected layer additional info + * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] weights Weights tensor. The weights must be 2 dimensional. + * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions. + * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension. + * Data type supported: Same as @p input. + * @param[in] biases Bias tensor. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED. + * @param[out] output Destination tensor. Its shape should be equal to the output of a matrix multiplication between: + * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer + * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer. + * Data type supported: Same as @p input. + * @param[in] fc_info (Optional) Fully connected layer additional info + * @param[in] weights_info (Optional) Stores neccessary compute information when weights are already reshaped */ void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, - FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo()); + FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(), const WeightsInfo &weights_info = WeightsInfo()); /** Static function to check if given info will lead to a valid configuration of @ref NEFullyConnectedLayer * * Similar to @ref NEFullyConnectedLayer @@ -135,6 +136,21 @@ public: static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo()); + /** Static function that queries whether fixed-format kernel exists for a given problem description + * + * @param[out] expected_weight_format Format in which weights should be for found fixed format kernel + * @param[in] input Source tensor + * @param[in] weights Weights tensor. + * @param[in] biases Bias tensor. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED. + * @param[in] output Destination tensor + * @param[in] fc_info Fully connected layer additional info + * @param[in] weights_info Describes weights shape + * + * @return a status + */ + static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *input, const ITensorInfo *weights, + const ITensorInfo *biases, const ITensorInfo *output, const FullyConnectedLayerInfo &fc_info, const WeightsInfo &weights_info); + //Inherited methods override void run() override; void prepare() override; diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h index ce68a61923..7ce2521148 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMM.h +++ b/arm_compute/runtime/NEON/functions/NEGEMM.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -84,6 +84,15 @@ public: */ static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); + /** Static function that queries whether there exists fixed-format kernel and if it exists it will return in the first argument in what format + * weights are expected to be reshaped as defined by WeightFormat class. Apart from the first argument the rest of the arguments are the same + * as in @ref NEGEMM::validate() except that all arguments are required. + * + * @return a status + */ + static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, + float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); + // Inherited methods overridden: void run() override; void prepare() override; |