From 73023027d6250daaa1df49fdeb1d21e59a0bf7f5 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Tue, 4 Sep 2018 14:55:55 +0100 Subject: COMPMID-1539 Implement YOLOLayer on CL Change-Id: I332c0703e1399fca0c5b724529b54a28f49c88da Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146842 Tested-by: Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/CL/CLHelpers.h | 8 + arm_compute/core/CL/CLKernels.h | 1 + arm_compute/core/CL/kernels/CLYOLOLayerKernel.h | 86 ++++++++++ arm_compute/core/Utils.h | 6 + arm_compute/runtime/CL/CLFunctions.h | 1 + arm_compute/runtime/CL/functions/CLYOLOLayer.h | 69 ++++++++ src/core/CL/CLHelpers.cpp | 30 ++++ src/core/CL/CLKernelLibrary.cpp | 6 + src/core/CL/cl_kernels/activation_helpers.h | 99 +++++++++++ src/core/CL/cl_kernels/activation_layer.cl | 74 +-------- src/core/CL/cl_kernels/yolo_layer.cl | 176 ++++++++++++++++++++ src/core/CL/kernels/CLActivationLayerKernel.cpp | 1 + .../CL/kernels/CLBatchNormalizationLayerKernel.cpp | 1 + .../CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp | 1 - src/core/CL/kernels/CLYOLOLayerKernel.cpp | 181 +++++++++++++++++++++ src/runtime/CL/functions/CLYOLOLayer.cpp | 42 +++++ tests/datasets/ActivationFunctionsDataset.h | 17 +- tests/datasets/ShapeDatasets.h | 32 ++++ tests/validation/CL/ActivationLayer.cpp | 9 +- tests/validation/CL/YOLOLayer.cpp | 127 +++++++++++++++ tests/validation/fixtures/YOLOLayerFixture.h | 162 ++++++++++++++++++ tests/validation/reference/ActivationLayer.cpp | 41 +---- tests/validation/reference/ActivationLayer.h | 50 +++++- tests/validation/reference/YOLOLayer.cpp | 80 +++++++++ tests/validation/reference/YOLOLayer.h | 47 ++++++ 25 files changed, 1224 insertions(+), 123 deletions(-) create mode 100644 arm_compute/core/CL/kernels/CLYOLOLayerKernel.h create mode 100644 arm_compute/runtime/CL/functions/CLYOLOLayer.h create mode 100644 src/core/CL/cl_kernels/activation_helpers.h create mode 100644 src/core/CL/cl_kernels/yolo_layer.cl create mode 100644 src/core/CL/kernels/CLYOLOLayerKernel.cpp create mode 100644 src/runtime/CL/functions/CLYOLOLayer.cpp create mode 100644 tests/validation/CL/YOLOLayer.cpp create mode 100644 tests/validation/fixtures/YOLOLayerFixture.h create mode 100644 tests/validation/reference/YOLOLayer.cpp create mode 100644 tests/validation/reference/YOLOLayer.h diff --git a/arm_compute/core/CL/CLHelpers.h b/arm_compute/core/CL/CLHelpers.h index 18d6bdf49f..a86870a250 100644 --- a/arm_compute/core/CL/CLHelpers.h +++ b/arm_compute/core/CL/CLHelpers.h @@ -47,6 +47,14 @@ static constexpr unsigned int max_cl_vector_width = 16; */ std::string get_cl_type_from_data_type(const DataType &dt); +/** Translates a tensor data type to the appropriate OpenCL select type. + * + * @param[in] dt @ref DataType to be translated to OpenCL select type. + * + * @return The string specifying the OpenCL select type to be used. + */ +std::string get_cl_select_type_from_data_type(const DataType &dt); + /** Get the size of a data type in number of bits. * * @param[in] dt @ref DataType. diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index 4a6773a5f8..9586d8cb34 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -123,6 +123,7 @@ #include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h" #include "arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h" #include "arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h" +#include "arm_compute/core/CL/kernels/CLYOLOLayerKernel.h" #include "arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h" #endif /* __ARM_COMPUTE_CLKERNELS_H__ */ diff --git a/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h b/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h new file mode 100644 index 0000000000..4c4aeac7e4 --- /dev/null +++ b/arm_compute/core/CL/kernels/CLYOLOLayerKernel.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLYOLOLAYERKERNEL_H__ +#define __ARM_COMPUTE_CLYOLOLAYERKERNEL_H__ + +#include "arm_compute/core/CL/ICLKernel.h" + +namespace arm_compute +{ +class ICLTensor; + +/** Interface for the YOLO layer kernel that performs partial activation. + * For each box, activate only: + * - x and y position (channel 0 and 1 of each box) + * - objectiveness (channel 4 of each box) + * - classes (channel 5 to (classes - 5) of each box) + */ +class CLYOLOLayerKernel : public ICLKernel +{ +public: + /** Default constructor */ + CLYOLOLayerKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLYOLOLayerKernel(const CLYOLOLayerKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLYOLOLayerKernel &operator=(const CLYOLOLayerKernel &) = delete; + /** Allow instances of this class to be moved */ + CLYOLOLayerKernel(CLYOLOLayerKernel &&) = default; + /** Allow instances of this class to be moved */ + CLYOLOLayerKernel &operator=(CLYOLOLayerKernel &&) = default; + /** Default destructor */ + ~CLYOLOLayerKernel() = default; + /** Set the input and output tensor. + * + * @note If the output tensor is a nullptr, the activation function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[out] output Destination tensor. Data type supported: same as @p input + * @param[in] act_info Activation layer information. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + */ + void configure(ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes); + /** Static function to check if given info will lead to a valid configuration of @ref CLYOLOLayerKernel + * + * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[in] output Destination tensor info. Data type supported: same as @p input + * @param[in] act_info Activation layer information. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes); + + // Inherited methods overridden: + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + ICLTensor *_input; + ICLTensor *_output; + bool _run_in_place; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_CLYOLOLAYERKERNEL_H__ */ diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h index 1cdfd389db..222f867e2c 100644 --- a/arm_compute/core/Utils.h +++ b/arm_compute/core/Utils.h @@ -1011,6 +1011,12 @@ inline std::string float_to_string_with_full_precision(float val) std::stringstream ss; ss.precision(std::numeric_limits::digits10 + 1); ss << val; + + if(val != static_cast(val)) + { + ss << "f"; + } + return ss.str(); } diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h index 2139d5dad3..935a1ae5d0 100644 --- a/arm_compute/runtime/CL/CLFunctions.h +++ b/arm_compute/runtime/CL/CLFunctions.h @@ -123,5 +123,6 @@ #include "arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h" #include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h" #include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h" +#include "arm_compute/runtime/CL/functions/CLYOLOLayer.h" #endif /* __ARM_COMPUTE_CLFUNCTIONS_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLYOLOLayer.h b/arm_compute/runtime/CL/functions/CLYOLOLayer.h new file mode 100644 index 0000000000..9931122226 --- /dev/null +++ b/arm_compute/runtime/CL/functions/CLYOLOLayer.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLYOLOLAYER_H__ +#define __ARM_COMPUTE_CLYOLOLAYER_H__ + +#include "arm_compute/runtime/CL/ICLSimpleFunction.h" + +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +class ICLTensor; + +/** Basic function to run @ref CLYOLOLayerKernel that performs a partial activation on the input + * + * For each box, activate only: + * - x and y position (channel 0 and 1 of each box) + * - objectiveness (channel 4 of each box) + * - classes (channel 5 to (classes - 5) of each box) + */ +class CLYOLOLayer : public ICLSimpleFunction +{ +public: + /** Set the input and output tensor. + * + * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[out] output Destination tensor. Data type supported: same as @p input + * @param[in] act_info Activation layer parameters. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + */ + void configure(ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes); + /** Static function to check if given info will lead to a valid configuration of @ref CLYOLOLayer + * + * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[in] output Destination tensor info. Data type supported: same as @p input + * @param[in] act_info Activation layer information. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes); +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_CLYOLOLAYER_H__ */ diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp index 9703b0fe16..5c435ddc22 100644 --- a/src/core/CL/CLHelpers.cpp +++ b/src/core/CL/CLHelpers.cpp @@ -64,6 +64,36 @@ std::string get_cl_type_from_data_type(const DataType &dt) } } +std::string get_cl_select_type_from_data_type(const DataType &dt) +{ + switch(dt) + { + case DataType::U8: + return "uchar"; + case DataType::S8: + return "char"; + case DataType::QASYMM8: + return "uchar"; + case DataType::U16: + return "ushort"; + case DataType::F16: + case DataType::S16: + return "short"; + case DataType::U32: + return "uint"; + case DataType::F32: + case DataType::S32: + return "int"; + case DataType::U64: + return "ulong"; + case DataType::S64: + return "long"; + default: + ARM_COMPUTE_ERROR("Unsupported input data type."); + return ""; + } +} + std::string get_data_size_from_data_type(const DataType &dt) { switch(dt) diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 7cc586bff1..75ff2482c8 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -428,6 +428,8 @@ const std::map CLKernelLibrary::_kernel_program_map = { "winograd_output_transform_4x4_5x5_nhwc", "winograd_output_transform.cl" }, { "winograd_output_transform_4x1_5x1_nhwc", "winograd_output_transform.cl" }, { "winograd_output_transform_1x4_1x5_nhwc", "winograd_output_transform.cl" }, + { "yolo_layer_nchw", "yolo_layer.cl" }, + { "yolo_layer_nhwc", "yolo_layer.cl" }, { "YUYV422_to_IYUV_bt709", "color_convert.cl" }, { "YUYV422_to_NV12_bt709", "color_convert.cl" }, { "YUYV422_to_RGB888_bt709", "color_convert.cl" }, @@ -797,6 +799,10 @@ const std::map CLKernelLibrary::_program_source_map = "winograd_output_transform.cl", #include "./cl_kernels/winograd_output_transform.clembed" }, + { + "yolo_layer.cl", +#include "./cl_kernels/yolo_layer.clembed" + }, #endif /* EMBEDDED_KERNELS */ }; diff --git a/src/core/CL/cl_kernels/activation_helpers.h b/src/core/CL/cl_kernels/activation_helpers.h new file mode 100644 index 0000000000..dfab082381 --- /dev/null +++ b/src/core/CL/cl_kernels/activation_helpers.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" + +#if defined(TYPE) && defined(SELECT_TYPE) + +#define CONST_ONE 1.f +#define ABS_OP(a) fabs((a)) +#define ADD_OP(a, b) ((a) + (b)) +#define SUB_OP(a, b) ((a) - (b)) +#define MUL_OP(a, b) ((a) * (b)) +#define MLA_OP(a, b, c) ((b) * (c) + (a)) +#define DIV_OP(a, b) ((a) / (b)) +#define EXP_OP(a) exp((a)) +#define LOG_OP(a) log((a)) +#define SQRT_OP(a) sqrt((a)) +#define TANH_OP(a) tanh((a)) + +// Logistic Activation +inline TYPE logistic_op(TYPE x) +{ + return DIV_OP((TYPE)CONST_ONE, ADD_OP((TYPE)CONST_ONE, EXP_OP(-x))); +} +// Hyperbolic Tangent Activation +inline TYPE tanh_op(TYPE x) +{ + return MUL_OP((TYPE)A_VAL, TANH_OP(MUL_OP((TYPE)B_VAL, x))); +} +// RELU Tangent Activation +inline TYPE relu_op(TYPE x) +{ + return max((TYPE)0, x); +} +// Bounded RELU Activation +inline TYPE brelu_op(TYPE x) +{ + return min((TYPE)A_VAL, max((TYPE)0, x)); +} +// Lower Upper Bounded RELU Activation +inline TYPE lu_brelu_op(TYPE x) +{ + return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); +} +// Leaky RELU Activation +inline TYPE lrelu_op(TYPE x) +{ + return select(MUL_OP((TYPE)A_VAL, x), x, CONVERT(x > (TYPE)0, SELECT_TYPE)); +} +// Soft RELU Activation +inline TYPE srelu_op(TYPE x) +{ + return LOG_OP(ADD_OP((TYPE)CONST_ONE, EXP_OP(x))); +} +// Absolute Activation +inline TYPE abs_op(TYPE x) +{ + return ABS_OP(x); +} +// Square Activation +inline TYPE square_op(TYPE x) +{ + return MUL_OP(x, x); +} +// Square-root Activation +inline TYPE sqrt_op(TYPE x) +{ + return SQRT_OP(x); +} +// Linear Activation +inline TYPE linear_op(TYPE x) +{ + return MLA_OP((TYPE)B_VAL, (TYPE)A_VAL, x); +} + +#define ACTIVATION_OP2(op, x) op##_op(x) +#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) + +#endif // defined(TYPE) && defined(SELECT_TYPE) \ No newline at end of file diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl index 373406a6da..cf1f434972 100644 --- a/src/core/CL/cl_kernels/activation_layer.cl +++ b/src/core/CL/cl_kernels/activation_layer.cl @@ -21,80 +21,10 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "helpers.h" - #define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) +#define SELECT_TYPE VEC_DATA_TYPE(SELECT_DATA_TYPE, VEC_SIZE) -#define CONST_ONE 1.f -#define ABS_OP(a) fabs((a)) -#define ADD_OP(a, b) ((a) + (b)) -#define SUB_OP(a, b) ((a) - (b)) -#define MUL_OP(a, b) ((a) * (b)) -#define MLA_OP(a, b, c) ((b) * (c) + (a)) -#define DIV_OP(a, b) ((a) / (b)) -#define EXP_OP(a) exp((a)) -#define LOG_OP(a) log((a)) -#define SQRT_OP(a) sqrt((a)) -#define TANH_OP(a) tanh((a)) - -// Logistic Activation -inline TYPE logistic_op(TYPE x) -{ - return DIV_OP((TYPE)CONST_ONE, ADD_OP((TYPE)CONST_ONE, EXP_OP(-x))); -} -// Hyperbolic Tangent Activation -inline TYPE tanh_op(TYPE x) -{ - return MUL_OP((TYPE)A_VAL, TANH_OP(MUL_OP((TYPE)B_VAL, x))); -} -// RELU Tangent Activation -inline TYPE relu_op(TYPE x) -{ - return max(0, x); -} -// Bounded RELU Activation -inline TYPE brelu_op(TYPE x) -{ - return min((TYPE)A_VAL, max(0, x)); -} -// Lower Upper Bounded RELU Activation -inline TYPE lu_brelu_op(TYPE x) -{ - return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); -} -// Leaky RELU Activation -inline TYPE lrelu_op(TYPE x) -{ - return select(MUL_OP((TYPE)A_VAL, x), x, x > (TYPE)0); -} -// Soft RELU Activation -inline TYPE srelu_op(TYPE x) -{ - return LOG_OP(ADD_OP((TYPE)CONST_ONE, EXP_OP(x))); -} -// Absolute Activation -inline TYPE abs_op(TYPE x) -{ - return ABS_OP(x); -} -// Square Activation -inline TYPE square_op(TYPE x) -{ - return MUL_OP(x, x); -} -// Square-root Activation -inline TYPE sqrt_op(TYPE x) -{ - return SQRT_OP(x); -} -// Linear Activation -inline TYPE linear_op(TYPE x) -{ - return MLA_OP((TYPE)B_VAL, (TYPE)A_VAL, x); -} - -#define ACTIVATION_OP2(op, x) op##_op(x) -#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) +#include "activation_helpers.h" #if defined(ACT) diff --git a/src/core/CL/cl_kernels/yolo_layer.cl b/src/core/CL/cl_kernels/yolo_layer.cl new file mode 100644 index 0000000000..2240d7c637 --- /dev/null +++ b/src/core/CL/cl_kernels/yolo_layer.cl @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#if defined(DATA_TYPE) && defined(SELECT_DATA_TYPE) && defined(ACT) && defined(NUM_CLASSES) && defined(VEC_SIZE) + +#if VEC_SIZE != 1 +#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) +#define SELECT_TYPE VEC_DATA_TYPE(SELECT_DATA_TYPE, VEC_SIZE) + +#include "activation_helpers.h" + +/** This performs a YOLO partial activation function for NCHW data layout + * + * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 + * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH + * @note The number of classes should be given as a preprocessor argument using -DNUM_CLASSES=num. e.g. -DNUM_CLASSES=80 + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. + * + * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32 + * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr + * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes) + * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes) + * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor + */ +__kernel void yolo_layer_nchw( + TENSOR3D_DECLARATION(input) +#ifndef IN_PLACE + , + TENSOR3D_DECLARATION(output) +#endif /* not IN_PLACE */ +) +{ + // Get pixels pointer + Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); +#ifdef IN_PLACE + Tensor3D output = input; +#else /* IN_PLACE */ + Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); +#endif /* IN_PLACE */ + + const int box_ch_id = get_global_id(2) % (NUM_CLASSES + 5); + const bool activate = box_ch_id != 2 && box_ch_id != 3; + + if(activate) + { + // Load data + TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); + data = ACTIVATION_OP(ACT, data); // select(1.0f, ACTIVATION_OP(ACT, data), (SELECT_TYPE)activate); + + // Store result + VSTORE(VEC_SIZE) + (data, 0, (__global DATA_TYPE *)output.ptr); + } +#ifndef IN_PLACE + else + { + // Load data + TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); + + // Store result + VSTORE(VEC_SIZE) + (data, 0, (__global DATA_TYPE *)output.ptr); + } +#endif // IN_PLACE +} + +#else // VEC_SIZE != 1 + +#define TYPE DATA_TYPE +#define SELECT_TYPE SELECT_DATA_TYPE + +#include "activation_helpers.h" + +/** This performs a YOLO partial activation function for NCHW data layout + * + * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=1 + * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH + * @note The number of classes should be given as a preprocessor argument using -DNUM_CLASSES=num. e.g. -DNUM_CLASSES=80 + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. + * + * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32 + * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr + * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes) + * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes) + * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor + */ +__kernel void yolo_layer_nhwc( + TENSOR3D_DECLARATION(input) +#ifndef IN_PLACE + , + TENSOR3D_DECLARATION(output) +#endif /* not IN_PLACE */ +) +{ + // Get pixels pointer + Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); +#ifdef IN_PLACE + Tensor3D output = input; +#else /* IN_PLACE */ + Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); +#endif /* IN_PLACE */ + + const int box_ch_id = get_global_id(0) % (NUM_CLASSES + 5); + const bool activate = box_ch_id != 2 && box_ch_id != 3; + + if(activate) + { + // Load data + DATA_TYPE data = *((__global DATA_TYPE *)input.ptr); + data = select(data, ACTIVATION_OP(ACT, data), (SELECT_TYPE)activate); + + // Store result + *((__global DATA_TYPE *)output.ptr) = data; + } +#ifndef IN_PLACE + else + { + // Load data + DATA_TYPE data = *((__global DATA_TYPE *)input.ptr); + + // Store result + *((__global DATA_TYPE *)output.ptr) = data; + } +#endif // IN_PLACE +} + +#endif // VEC_SIZE != 1 +#endif // defined(DATA_TYPE) && defined(SELECT_DATA_TYPE) && defined(ACT) && defined(NUM_CLASSES) && defined(VEC_SIZE) \ No newline at end of file diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp index a15e99b8d4..73a4d7d2c6 100644 --- a/src/core/CL/kernels/CLActivationLayerKernel.cpp +++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp @@ -133,6 +133,7 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act std::set build_opts; build_opts.emplace(("-DACT=" + lower_string(string_from_activation_func(act_info.activation())))); build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(dt))); + build_opts.emplace(("-DSELECT_DATA_TYPE=" + get_cl_select_type_from_data_type(dt))); build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); if(is_data_type_quantized(dt)) diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp index 1fa5c8521f..07bcb75a6a 100644 --- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp @@ -159,6 +159,7 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out // Set build options CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + build_opts.add_option("-DSELECT_DATA_TYPE=" + get_cl_select_type_from_data_type(input->info()->data_type())); build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); build_opts.add_option_if(act_info.enabled(), "-DFUSED_ACTIVATION=" + lower_string(string_from_activation_func(act_info.activation()))); build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a())); diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp index cc8384c81b..d56ac01a83 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp @@ -173,7 +173,6 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, _border_size = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0); const unsigned int num_elems_accessed_per_iteration = is_qasymm ? 4 : (8 / input->info()->element_size()); - ; CLBuildOptions build_opts; build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS"); diff --git a/src/core/CL/kernels/CLYOLOLayerKernel.cpp b/src/core/CL/kernels/CLYOLOLayerKernel.cpp new file mode 100644 index 0000000000..7d9dbd4ac5 --- /dev/null +++ b/src/core/CL/kernels/CLYOLOLayerKernel.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLYOLOLayerKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Window.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/Types.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + ARM_COMPUTE_UNUSED(act_info); + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); + + const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); + ARM_COMPUTE_RETURN_ERROR_ON(num_classes <= 0); + ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(channel_idx) % (num_classes + 5)) != 0); + + // Checks performed when output is configured + if((output != nullptr) && (output->total_size() != 0)) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) +{ + if(output != nullptr) + { + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output, *input); + } + + const bool is_nchw = input->data_layout() == DataLayout::NCHW; + const unsigned int num_elems_processed_per_iteration = is_nchw ? 16 / input->element_size() : 1; + + Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); + bool window_changed = false; + + if(output != nullptr) + { + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); + AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); + window_changed = update_window_and_padding(win, input_access, output_access); + output_access.set_valid_region(win, input->valid_region()); + } + else + { + window_changed = update_window_and_padding(win, AccessWindowHorizontal(input, 0, num_elems_processed_per_iteration)); + } + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} +} // namespace + +CLYOLOLayerKernel::CLYOLOLayerKernel() + : _input(nullptr), _output(nullptr), _run_in_place(false) +{ +} + +void CLYOLOLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input); + + _run_in_place = (output == nullptr) || (output == input); + + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, act_info, num_classes)); + + const bool is_nchw = input->info()->data_layout() == DataLayout::NCHW; + const unsigned int num_elems_processed_per_iteration = is_nchw ? 16 / input->info()->element_size() : 1; + const DataType dt = input->info()->data_type(); + float a_const = act_info.a(); + float b_const = act_info.b(); + + // Set build options + CLBuildOptions build_opts; + build_opts.add_option("-DACT=" + lower_string(string_from_activation_func(act_info.activation()))); + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)); + build_opts.add_option("-DSELECT_DATA_TYPE=" + get_cl_select_type_from_data_type(dt)); + build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); + build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(a_const)); + build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(b_const)); + build_opts.add_option("-DNUM_CLASSES=" + support::cpp11::to_string(num_classes)); + build_opts.add_option_if(_run_in_place, "-DIN_PLACE"); + + // Create kernel + std::string kernel_name = std::string("yolo_layer_") + lower_string(string_from_data_layout(input->info()->data_layout())); + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); + + // Make sure _kernel is initialized before calling the parent's configure + _input = input; + _output = output; + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), (_run_in_place) ? nullptr : output->info()); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + ICLKernel::configure_internal(win_config.second); + + // Set config_id for enabling LWS tuning + _config_id = "yolo_layer_"; + _config_id += lower_string(string_from_data_type(dt)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(1)); + _config_id += "_"; + _config_id += lower_string(string_from_data_layout(input->info()->data_layout())); +} + +Status CLYOLOLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + const bool run_in_place = (output == nullptr) || (output == input); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, act_info, num_classes)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (run_in_place) ? nullptr : output->clone().get()).first); + + return Status{}; +} + +void CLYOLOLayerKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_3D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, slice); + if(!_run_in_place) + { + add_3D_tensor_argument(idx, _output, slice); + } + enqueue(queue, *this, slice, lws_hint()); + } + while(collapsed.slide_window_slice_3D(slice)); +} +} // namespace arm_compute \ No newline at end of file diff --git a/src/runtime/CL/functions/CLYOLOLayer.cpp b/src/runtime/CL/functions/CLYOLOLayer.cpp new file mode 100644 index 0000000000..5a612ba4b4 --- /dev/null +++ b/src/runtime/CL/functions/CLYOLOLayer.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLYOLOLayer.h" + +#include "arm_compute/core/CL/kernels/CLYOLOLayerKernel.h" +#include "arm_compute/core/Types.h" +#include "support/ToolchainSupport.h" + +using namespace arm_compute; + +void CLYOLOLayer::configure(ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, output, act_info, num_classes); + _kernel = std::move(k); +} + +Status CLYOLOLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + return CLYOLOLayerKernel::validate(input, output, act_info, num_classes); +} diff --git a/tests/datasets/ActivationFunctionsDataset.h b/tests/datasets/ActivationFunctionsDataset.h index 31323dc8be..147c5ae51b 100644 --- a/tests/datasets/ActivationFunctionsDataset.h +++ b/tests/datasets/ActivationFunctionsDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -56,6 +56,21 @@ public: { } }; + +class ActivationFunctionsQuantized final : public framework::dataset::ContainerDataset> +{ +public: + ActivationFunctionsQuantized() + : ContainerDataset("ActivationFunctionQuantized", + { + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, + ActivationLayerInfo::ActivationFunction::RELU, + ActivationLayerInfo::ActivationFunction::LOGISTIC, + ActivationLayerInfo::ActivationFunction::BOUNDED_RELU + }) + { + } +}; } // namespace datasets } // namespace test } // namespace arm_compute diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h index c7955bc8c5..551e7ffa8c 100644 --- a/tests/datasets/ShapeDatasets.h +++ b/tests/datasets/ShapeDatasets.h @@ -846,6 +846,38 @@ public: { } }; + +/** Data set containing small YOLO tensor shapes. */ +class SmallYOLOShapes final : public ShapeDataset +{ +public: + SmallYOLOShapes() + : ShapeDataset("Shape", + { + // Batch size 1 + TensorShape{ 11U, 11U, 270U }, + TensorShape{ 27U, 13U, 90U }, + TensorShape{ 128U, 64U, 45U, 2U }, + TensorShape{ 11U, 11U, 45U, 3U } + }) + { + } +}; + +/** Data set containing large YOLO tensor shapes. */ +class LargeYOLOShapes final : public ShapeDataset +{ +public: + LargeYOLOShapes() + : ShapeDataset("Shape", + { + TensorShape{ 24U, 23U, 270U }, + TensorShape{ 51U, 63U, 90U, 2U }, + TensorShape{ 76U, 91U, 45U, 3U } + }) + { + } +}; } // namespace datasets } // namespace test } // namespace arm_compute diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index d91f7082b4..8a6d5ad88a 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -202,14 +202,7 @@ TEST_SUITE_END() template using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture; -/** Input data sets. */ -const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, - ActivationLayerInfo::ActivationFunction::RELU, - ActivationLayerInfo::ActivationFunction::LOGISTIC, - ActivationLayerInfo::ActivationFunction::BOUNDED_RELU - }); - -const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), QuantizedActivationFunctionsDataset), +const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctionsQuantized()), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); TEST_SUITE(Quantized) diff --git a/tests/validation/CL/YOLOLayer.cpp b/tests/validation/CL/YOLOLayer.cpp new file mode 100644 index 0000000000..d8e6e54246 --- /dev/null +++ b/tests/validation/CL/YOLOLayer.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLYOLOLayer.h" +#include "tests/CL/CLAccessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ActivationFunctionsDataset.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/YOLOLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +/** Define tolerance of the yolo layer. + * + * @param[in] activation The activation function used. + * @param[in] data_type Data type. + * + * @return Tolerance depending on the activation function. + */ +AbsoluteTolerance tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type) +{ + constexpr float epsilon = 1e-6f; + + switch(activation) + { + case ActivationLayerInfo::ActivationFunction::LINEAR: + return AbsoluteTolerance(data_type == DataType::F16 ? 0.2f : epsilon); + case ActivationLayerInfo::ActivationFunction::SQUARE: + return AbsoluteTolerance(data_type == DataType::F16 ? 0.1f : epsilon); + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : epsilon); + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + return AbsoluteTolerance(data_type == DataType::F16 ? 0.00001f : epsilon); + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + case ActivationLayerInfo::ActivationFunction::SQRT: + return AbsoluteTolerance(data_type == DataType::F16 ? 0.01f : 0.00001f); + case ActivationLayerInfo::ActivationFunction::TANH: + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : 0.00001f); + default: + return AbsoluteTolerance(epsilon); + } +} + +/** Floating point data sets. */ +const auto YOLODataset = combine(combine(combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctions()), + framework::dataset::make("AlphaBeta", { 0.5f, 1.f })), + framework::dataset::make("Classes", 40)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })); +} // namespace + +TEST_SUITE(CL) +TEST_SUITE(YOLOLayer) + +template +using CLYOLOLayerFixture = YOLOValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLYOLOLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLYOLOLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); +} +TEST_SUITE_END() // FP32 + +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLYOLOLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLYOLOLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); +} +TEST_SUITE_END() // FP16 +TEST_SUITE_END() // Float + +TEST_SUITE_END() // YOLOLayer +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/YOLOLayerFixture.h b/tests/validation/fixtures/YOLOLayerFixture.h new file mode 100644 index 0000000000..a3842e1e8a --- /dev/null +++ b/tests/validation/fixtures/YOLOLayerFixture.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_YOLO_LAYER_FIXTURE +#define ARM_COMPUTE_TEST_YOLO_LAYER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/YOLOLayer.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template +class YOLOValidationGenericFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, int32_t num_classes, DataLayout data_layout, DataType data_type, + QuantizationInfo quantization_info) + { + _data_type = data_type; + _function = function; + + ActivationLayerInfo info(function, alpha_beta, alpha_beta); + + _target = compute_target(shape, in_place, info, num_classes, data_layout, data_type, quantization_info); + _reference = compute_reference(shape, info, num_classes, data_type, quantization_info); + } + +protected: + template + void fill(U &&tensor) + { + float min_bound = 0; + float max_bound = 0; + std::tie(min_bound, max_bound) = get_activation_layer_test_bounds(_function, _data_type); + std::uniform_real_distribution<> distribution(min_bound, max_bound); + library->fill(tensor, distribution, 0); + } + + TensorType compute_target(TensorShape shape, bool in_place, const ActivationLayerInfo &info, int32_t num_classes, DataLayout data_layout, DataType data_type, QuantizationInfo quantization_info) + { + if(data_layout == DataLayout::NHWC) + { + permute(shape, PermutationVector(2U, 0U, 1U)); + } + + // Create tensors + TensorType src = create_tensor(shape, data_type, 1, quantization_info, data_layout); + TensorType dst = create_tensor(shape, data_type, 1, quantization_info, data_layout); + + // Create and configure function + FunctionType yolo_layer; + + TensorType *dst_ptr = in_place ? &src : &dst; + + yolo_layer.configure(&src, dst_ptr, info, num_classes); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + + if(!in_place) + { + dst.allocator()->allocate(); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + } + + // Fill tensors + fill(AccessorType(src)); + + // Compute function + yolo_layer.run(); + + if(in_place) + { + return src; + } + else + { + return dst; + } + } + + SimpleTensor compute_reference(const TensorShape &shape, const ActivationLayerInfo &info, int32_t num_classes, DataType data_type, QuantizationInfo quantization_info) + { + // Create reference + SimpleTensor src{ shape, data_type, 1, quantization_info }; + + // Fill reference + fill(src); + + return reference::yolo_layer(src, info, num_classes); + } + + TensorType _target{}; + SimpleTensor _reference{}; + DataType _data_type{}; + ActivationLayerInfo::ActivationFunction _function{}; +}; + +template +class YOLOValidationFixture : public YOLOValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, int32_t num_classes, DataLayout data_layout, DataType data_type) + { + YOLOValidationGenericFixture::setup(shape, in_place, function, alpha_beta, num_classes, data_layout, data_type, QuantizationInfo()); + } +}; + +template +class YOLOValidationQuantizedFixture : public YOLOValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, int32_t num_classes, DataLayout data_layout, DataType data_type, + QuantizationInfo quantization_info) + { + YOLOValidationGenericFixture::setup(shape, in_place, function, alpha_beta, num_classes, data_layout, data_type, quantization_info); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ARM_COMPUTE_TEST_YOLO_LAYER_FIXTURE diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp index 9455effd72..9750ea95a6 100644 --- a/tests/validation/reference/ActivationLayer.cpp +++ b/tests/validation/reference/ActivationLayer.cpp @@ -46,46 +46,7 @@ SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo for(int i = 0; i < src.num_elements(); ++i) { - T x = src[i]; - - switch(info.activation()) - { - case ActivationLayerInfo::ActivationFunction::ABS: - dst[i] = std::abs(x); - break; - case ActivationLayerInfo::ActivationFunction::LINEAR: - dst[i] = a * x + b; - break; - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - dst[i] = static_cast(1) / (static_cast(1) + std::exp(-x)); - break; - case ActivationLayerInfo::ActivationFunction::RELU: - dst[i] = std::max(static_cast(0), x); - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - dst[i] = std::min(a, std::max(static_cast(0), x)); - break; - case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU: - dst[i] = std::min(a, std::max(b, x)); - break; - case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - dst[i] = (x > 0) ? x : a * x; - break; - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - dst[i] = std::log(static_cast(1) + std::exp(x)); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - dst[i] = std::sqrt(x); - break; - case ActivationLayerInfo::ActivationFunction::SQUARE: - dst[i] = x * x; - break; - case ActivationLayerInfo::ActivationFunction::TANH: - dst[i] = a * std::tanh(b * x); - break; - default: - ARM_COMPUTE_ERROR("Unsupported activation function"); - } + dst[i] = activate_float(src[i], a, b, info.activation()); } return dst; diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h index 09f602ffa1..c752e74733 100644 --- a/tests/validation/reference/ActivationLayer.h +++ b/tests/validation/reference/ActivationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,54 @@ namespace validation { namespace reference { +template +inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction activation) +{ + T ret; + + switch(activation) + { + case ActivationLayerInfo::ActivationFunction::ABS: + ret = std::abs(x); + break; + case ActivationLayerInfo::ActivationFunction::LINEAR: + ret = a * x + b; + break; + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + ret = static_cast(1) / (static_cast(1) + std::exp(-x)); + break; + case ActivationLayerInfo::ActivationFunction::RELU: + ret = std::max(static_cast(0), x); + break; + case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: + ret = std::min(a, std::max(static_cast(0), x)); + break; + case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU: + ret = std::min(a, std::max(b, x)); + break; + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + ret = (x > 0) ? x : a * x; + break; + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + ret = std::log(static_cast(1) + std::exp(x)); + break; + case ActivationLayerInfo::ActivationFunction::SQRT: + ret = std::sqrt(x); + break; + case ActivationLayerInfo::ActivationFunction::SQUARE: + ret = x * x; + break; + case ActivationLayerInfo::ActivationFunction::TANH: + ret = a * std::tanh(b * x); + break; + default: + ARM_COMPUTE_ERROR("Unsupported activation function"); + break; + } + + return ret; +} + template ::value, int>::type = 0> SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info); diff --git a/tests/validation/reference/YOLOLayer.cpp b/tests/validation/reference/YOLOLayer.cpp new file mode 100644 index 0000000000..a12f411680 --- /dev/null +++ b/tests/validation/reference/YOLOLayer.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "YOLOLayer.h" + +#include "ActivationLayer.h" + +#include "arm_compute/core/Types.h" +#include "tests/validation/Helpers.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template ::value, int>::type> +SimpleTensor yolo_layer(const SimpleTensor &src, const ActivationLayerInfo &info, int32_t num_classes) +{ + // Create reference + SimpleTensor dst{ src.shape(), src.data_type() }; + + // Compute reference + const T a(info.a()); + const T b(info.b()); + + for(int i = 0; i < src.num_elements(); ++i) + { + const size_t z = index2coord(dst.shape(), i).z() % (num_classes + 5); + + if(z != 2 && z != 3) + { + dst[i] = activate_float(src[i], a, b, info.activation()); + } + else + { + dst[i] = src[i]; + } + } + + return dst; +} + +template <> +SimpleTensor yolo_layer(const SimpleTensor &src, const ActivationLayerInfo &info, int32_t num_classes) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + SimpleTensor dst_tmp = yolo_layer(src_tmp, info, num_classes); + SimpleTensor dst = convert_to_asymmetric(dst_tmp, src.quantization_info()); + return dst; +} + +template SimpleTensor yolo_layer(const SimpleTensor &src, const ActivationLayerInfo &info, int32_t num_classes); +template SimpleTensor yolo_layer(const SimpleTensor &src, const ActivationLayerInfo &info, int32_t num_classes); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/reference/YOLOLayer.h b/tests/validation/reference/YOLOLayer.h new file mode 100644 index 0000000000..659f1dd2d9 --- /dev/null +++ b/tests/validation/reference/YOLOLayer.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_YOLO_LAYER_H__ +#define __ARM_COMPUTE_TEST_YOLO_LAYER_H__ + +#include "tests/SimpleTensor.h" +#include "tests/validation/Helpers.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template ::value, int>::type = 0> +SimpleTensor yolo_layer(const SimpleTensor &src, const ActivationLayerInfo &info, int32_t num_classes); + +template ::value, int>::type = 0> +SimpleTensor yolo_layer(const SimpleTensor &src, const ActivationLayerInfo &info, int32_t num_classes); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* __ARM_COMPUTE_TEST_YOLO_LAYER_H__ */ -- cgit v1.2.1