diff options
-rw-r--r-- | arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h | 18 | ||||
-rw-r--r-- | arm_compute/runtime/CL/functions/CLSoftmaxLayer.h | 4 | ||||
-rw-r--r-- | src/core/CL/CLHelpers.cpp | 2 | ||||
-rw-r--r-- | src/core/CL/CLKernelLibrary.cpp | 4 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/fixed_point.h | 229 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/helpers.h | 2 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/softmax_layer.cl | 85 | ||||
-rw-r--r-- | src/core/CL/kernels/CLSoftmaxLayerKernel.cpp | 39 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLSoftmaxLayer.cpp | 8 | ||||
-rw-r--r-- | tests/benchmark/main.cpp | 2 | ||||
-rw-r--r-- | tests/validation/CL/SoftmaxLayer.cpp | 195 | ||||
-rw-r--r-- | tests/validation/NEON/SoftmaxLayer.cpp | 2 | ||||
-rw-r--r-- | tests/validation/Reference.cpp | 2 | ||||
-rw-r--r-- | tests/validation/main.cpp | 2 |
14 files changed, 532 insertions, 62 deletions
diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h index 0806974ad6..024169a7c5 100644 --- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h @@ -36,8 +36,8 @@ class CLLogits1DMaxKernel : public ICLSimple2DKernel public: /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: F16, F32. Number of channels must be 1. - * @param[out] output Destination tensor. Matching input type and channel number. + * @param[in] input Source tensor. Data types supported: QS8/F16/F32 + * @param[out] output Destination tensor. Data types supported: same as @p input */ void configure(const ICLTensor *input, ICLTensor *output); }; @@ -58,10 +58,10 @@ public: CLLogits1DShiftExpSumKernel &operator=(CLLogits1DShiftExpSumKernel &&) = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: F16, F32. Number of channels must be 1. - * @param[in] max Max values tensor. Matching input type and channel number. - * @param[out] output Destination tensor. Matching input type and channel number. - * @param[out] sum Sum of 1D logits tensor. Matching input type and channel number. + * @param[in] input Source tensor. Data types supported: QS8/F16/F32 + * @param[in] max Max values tensor. Data types supported: same as @p input + * @param[out] output Destination tensor. Data types supported: same as @p input + * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input */ void configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum); @@ -91,9 +91,9 @@ public: CLLogits1DNormKernel &operator=(CLLogits1DNormKernel &&) = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: F16, F32. Number of channels must be 1. - * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Matching input type and channel number. - * @param[out] output Destination tensor. Matching input type and channel number. + * @param[in] input Source tensor. Data types supported: QS8/F16/F32 + * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input + * @param[out] output Destination tensor. Data types supported: same as @p input */ void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output); diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h index 42cfc06fc4..a5b19bd5e1 100644 --- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h +++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h @@ -49,8 +49,8 @@ public: CLSoftmaxLayer(); /** Set the input and output tensors. * - * @param[in] input Source tensor. Data types supported: F16, F32. Number of channels must be 1. - * @param[out] output Destination tensor. Matching input type and channel number. + * @param[in] input Source tensor. Data types supported: QS8/F16/F32 + * @param[out] output Destination tensor. Data types supported: same as @p input */ void configure(const ICLTensor *input, ICLTensor *output); diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp index 21b72ddd3b..4b5bbbbb49 100644 --- a/src/core/CL/CLHelpers.cpp +++ b/src/core/CL/CLHelpers.cpp @@ -72,6 +72,8 @@ std::string get_cl_type_from_data_type(const DataType &dt) { case DataType::U8: return "uchar"; + case DataType::QS8: + return "qs8"; case DataType::S8: return "char"; case DataType::U16: diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 6c64265785..081edac8e1 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -353,6 +353,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map = #include "./cl_kernels/fill_border.clembed" }, { + "fixed_point.h", +#include "./cl_kernels/fixed_point.hembed" + }, + { "gaussian_pyramid.cl", #include "./cl_kernels/gaussian_pyramid.clembed" }, diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h new file mode 100644 index 0000000000..2c100c2e28 --- /dev/null +++ b/src/core/CL/cl_kernels/fixed_point.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_FIXED_POINT_H +#define ARM_COMPUTE_FIXED_POINT_H + +#define TYPE_ALIAS(type, alias) \ + typedef type alias; \ + typedef type alias##x##1; \ + typedef type##2 alias##x##2; \ + typedef type##3 alias##x##3; \ + typedef type##4 alias##x##4; \ + typedef type##8 alias##x##8; \ + typedef type##16 alias##x##16; + +TYPE_ALIAS(char, qs8) +TYPE_ALIAS(short, qs16) + +#define qs8_MIN ((char)CHAR_MIN) +#define qs8_MAX ((char)CHAR_MAX) +#define qs16_MIN ((short)SHRT_MIN) +#define qs16_MAX ((short)SHRT_MAX) + +#define qu8_MIN ((uchar)0) +#define qu8_MAX ((uchar)UCHAR_MAX) +#define qu16_MIN ((ushort)0) +#define qu16_MAX ((ushort)USHRT_MAX) + +#define qs8_TYPE char +#define qs8x1_TYPE char +#define qs8x2_TYPE char2 +#define qs8x4_TYPE char4 +#define qs8x8_TYPE char8 +#define qs8x16_TYPE char16 + +#define qs16_TYPE short +#define qs16x1_TYPE short +#define qs16x2_TYPE short2 +#define qs16x4_TYPE short4 +#define qs16x8_TYPE short8 +#define qs16x16_TYPE short16 + +#undef VEC_DATA_TYPE_STR +#undef VEC_DATA_TYPE +#undef CONVERT_STR +#undef CONVERT +#undef CONVERT_SAT_STR +#undef CONVERT_SAT + +#define VEC_DATA_TYPE_STR(type, size) type##x##size +#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) + +#define CONVERT_STR3(x, type, rtype) (convert_##rtype((x))) +#define CONVERT_STR2(x, type, rtype) CONVERT_STR3(x, type, rtype) +#define CONVERT_STR(x, type) CONVERT_STR2(x, type, type##_TYPE) +#define CONVERT(x, type) CONVERT_STR(x, type) + +#define CONVERT_SAT_STR3(x, type, rtype) (convert_##rtype##_sat((x))) +#define CONVERT_SAT_STR2(x, type, rtype) CONVERT_SAT_STR3(x, type, rtype) +#define CONVERT_SAT_STR(x, type) CONVERT_SAT_STR2(x, type, type##_TYPE) +#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) + +/* Computes max of fixed point types. + * + * @param[in] type is the actual data type. + * + * @return The result of the fixed point vector maximum. + */ +#define MAXQ_IMPL(type) \ + inline type max_##type(type VopA, type VopB) \ + { \ + return max(VopA, VopB); \ + } + +MAXQ_IMPL(qs8x1) +MAXQ_IMPL(qs8x2) +MAXQ_IMPL(qs8x4) +MAXQ_IMPL(qs8x8) +MAXQ_IMPL(qs8x16) + +#define MAX_OP_EXPAND_STR(a, b, type, size) max_##type##x##size((a), (b)) +#define MAX_OP_EXPAND(a, b, type, size) MAX_OP_EXPAND_STR(a, b, type, size) + +/* Computes saturated addition of fixed point types. + * + * @param[in] type is the actual data type. + * + * @return The result of the fixed point vector addition. The result is saturated in case of overflow + */ +#define ADDQ_SAT_IMPL(type) \ + inline type add_sat_##type(type VopA, type VopB) \ + { \ + return add_sat(VopA, VopB); \ + } + +ADDQ_SAT_IMPL(qs8x1) +ADDQ_SAT_IMPL(qs8x2) +ADDQ_SAT_IMPL(qs8x4) +ADDQ_SAT_IMPL(qs8x8) +ADDQ_SAT_IMPL(qs8x16) + +#define ADD_SAT_OP_EXPAND_STR(a, b, type, size) add_sat_##type##x##size((a), (b)) +#define ADD_SAT_OP_EXPAND(a, b, type, size) ADD_SAT_OP_EXPAND_STR(a, b, type, size) + +/* Computes saturated subtraction of fixed point types. + * + * @param[in] type is the actual data type. + * + * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow + */ +#define SUBQ_SAT_IMPL(type) \ + inline type sub_sat_##type(type VopA, type VopB) \ + { \ + return sub_sat(VopA, VopB); \ + } + +SUBQ_SAT_IMPL(qs8x1) +SUBQ_SAT_IMPL(qs8x2) +SUBQ_SAT_IMPL(qs8x4) +SUBQ_SAT_IMPL(qs8x8) +SUBQ_SAT_IMPL(qs8x16) + +#define SUB_SAT_OP_EXPAND_STR(a, b, type, size) sub_sat_##type##x##size((a), (b)) +#define SUB_SAT_OP_EXPAND(a, b, type, size) SUB_SAT_OP_EXPAND_STR(a, b, type, size) + +/* Saturate multiply of two fixed point vectors + * + * @param[in] type is the actual data type. + * @param[in] itype is the intermediate data type. + * + * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow + */ +#define MULQ_SAT_IMPL(type, itype) \ + inline type mul_sat_##type(type VopA, type VopB, int fixed_point_position) \ + { \ + itype round_val = (itype)(1 << (fixed_point_position - 1)); \ + itype res = mad_sat(CONVERT((VopA), itype), CONVERT((VopB), itype), round_val); \ + return CONVERT_SAT((res >> (itype)fixed_point_position), type); \ + } + +MULQ_SAT_IMPL(qs8x16, qs16x16) + +#define MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) mul_sat_##type##x##size((a), (b), (position)) +#define MUL_SAT_OP_EXPAND(a, b, type, size, position) MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) + +/** Saturate division of two fixed point vectors + * + * @param[in] stype is the actual scalar data type. + * @param[in] type is the actual data type. + * @param[in] itype is the intermediate data type. + * + * @return The result of the fixed point division. The result is saturated in case of overflow + */ +#define DIVQ_SAT_IMPL(stype, type, itype) \ + inline type div_sat_##type(type VopA, type VopB, int fixed_point_position) \ + { \ + itype conv_a = CONVERT((VopA), itype); \ + itype denominator = CONVERT((VopB), itype); \ + itype numerator = conv_a << (itype)(fixed_point_position); \ + itype res = select(numerator / denominator, select((itype)stype##_MAX, (itype)stype##_MIN, conv_a < (itype)0), denominator == (itype)0); \ + return CONVERT_SAT((res), type); \ + } + +DIVQ_SAT_IMPL(qs8, qs8x16, qs16x16) + +#define DIV_SAT_OP_EXPAND_STR(a, b, type, size, position) div_sat_##type##x##size((a), (b), (position)) +#define DIV_SAT_OP_EXPAND(a, b, type, size, position) DIV_SAT_OP_EXPAND_STR(a, b, type, size, position) + +/** Saturate exponential fixed point 8 bit (16 elements) + * + * @param[in] a 8 bit fixed point input vector + * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number + * + * @return The result of the 8 bit fixed point exponential. The result is saturated in case of overflow + */ +qs8x16 inline exp_qs8x16(qs8x16 a, int fixed_point_position) +{ + // Constants (literal constants are calculated by converting the respective float to the fixed point with the highest supported fixed point position) + char16 const_one = (char16)(1 << (fixed_point_position)); + char16 ln2 = (char16)(((0x58 >> (6 - fixed_point_position)) + 1) >> 1); // 0.693147 + char16 inv_ln2 = ((char16)(((0x38 >> (6 - (fixed_point_position))) + 1) >> 1)) | const_one; // 1.442695 + char16 A = (char16)(((0x7F >> (6 - (fixed_point_position))) + 1) >> 1); // 0.9978546 + char16 B = (char16)(((0x3F >> (6 - (fixed_point_position))) + 1) >> 1); // 0.4994721 + char16 C = (char16)(((0x16 >> (6 - (fixed_point_position))) + 1) >> 1); // 0.1763723 + char16 D = (char16)(((0x05 >> (6 - (fixed_point_position))) + 1) >> 1); // 0.0435108 + + // Perform range reduction [-log(2),log(2)] + char16 m = mul_sat_qs8x16(a, inv_ln2, fixed_point_position); + + // get decimal part of m + char16 dec_m = m >> (char16)fixed_point_position; + + char16 alpha = mul_sat_qs8x16(dec_m << (char16)fixed_point_position, ln2, fixed_point_position); + alpha = convert_char16(abs_diff(a, alpha)); + + // Polynomial expansion + char16 sum = add_sat_qs8x16(mul_sat_qs8x16(alpha, D, fixed_point_position), C); + sum = add_sat_qs8x16(mul_sat_qs8x16(alpha, sum, fixed_point_position), B); + sum = add_sat_qs8x16(mul_sat_qs8x16(alpha, sum, fixed_point_position), A); + sum = add_sat_qs8x16(mul_sat_qs8x16(alpha, sum, fixed_point_position), const_one); + + // Reconstruct and saturate result + return select(select(sum << dec_m, sum >> -dec_m, dec_m < (char16)0), (char16)0x7F, clz(sum) <= dec_m); +} + +#define EXP_OP_EXPAND_STR(a, type, size, position) exp_##type##x##size((a), (position)) +#define EXP_OP_EXPAND(a, type, size, position) EXP_OP_EXPAND_STR(a, type, size, position) + +#endif // ARM_COMPUTE_FIXED_POINT_H diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h index 6db8ed567c..cf3cb78e04 100644 --- a/src/core/CL/cl_kernels/helpers.h +++ b/src/core/CL/cl_kernels/helpers.h @@ -26,6 +26,8 @@ #pragma OPENCL EXTENSION cl_khr_fp16 : enable +#define EXPAND(x) x + #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) #define VEC_DATA_TYPE_STR(type, size) type##size diff --git a/src/core/CL/cl_kernels/softmax_layer.cl b/src/core/CL/cl_kernels/softmax_layer.cl index 632b4a5374..a29aea4fae 100644 --- a/src/core/CL/cl_kernels/softmax_layer.cl +++ b/src/core/CL/cl_kernels/softmax_layer.cl @@ -23,14 +23,36 @@ */ #include "helpers.h" +#if defined(FIXED_POINT_POSITION) + +#include "fixed_point.h" +#define MAX_OP(x, y, type, size) MAX_OP_EXPAND(x, y, type, size) +#define ADD_OP(x, y, type, size) ADD_SAT_OP_EXPAND((x), (y), type, size) +#define SUB_OP(x, y, type, size) SUB_SAT_OP_EXPAND((x), (y), type, size) +#define DIV_OP(x, y, type, size) DIV_SAT_OP_EXPAND((x), (y), type, size, FIXED_POINT_POSITION) +#define EXP_OP(x, type, size) EXP_OP_EXPAND((x), type, size, FIXED_POINT_POSITION) + +#define MIN_VAL_EXPAND(type) type##_MIN +#define MIN_VAL(type) MIN_VAL_EXPAND(type) +#define MINVAL MIN_VAL(DATA_TYPE) +#define SELECT_DATA_TYPE EXPAND(DATA_TYPE) + +#else + +#define MAX_OP(x, y, type, size) max((x), (y)) +#define ADD_OP(x, y, type, size) ((x) + (y)) +#define SUB_OP(x, y, type, size) ((x) - (y)) +#define DIV_OP(x, y, type, size) ((x) / (y)) +#define EXP_OP(x, type, size) exp((x)) + #if defined USE_F16 -#define MINVAL HALF_MIN +#define MINVAL -HALF_MAX #define SELECT_DATA_TYPE short -#define DATA_TYPE half #else -#define MINVAL FLT_MIN +#define MINVAL -FLT_MAX #define SELECT_DATA_TYPE int -#define DATA_TYPE float +#endif + #endif __constant VEC_DATA_TYPE(DATA_TYPE, 16) type_min = (VEC_DATA_TYPE(DATA_TYPE, 16))(MINVAL); @@ -39,16 +61,16 @@ __constant uint16 idx16 = (uint16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, /** Identifies the maximum value across the 1st dimension. * * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short - * @note In case F16 is used -DUSE_HALF must be passed otherwise the kernel will default to used F32. + * @note Fixed point position must be given as a preprocessor argument using -DFIXED_POINT_POSITION=pos. e.g. DFIXED_POINT_POSITION=4 * @note In case the input is not multiple of 16 -DNON_MULTIPLE_OF_16 must be passed. * - * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16, F32 + * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: QS8/F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: F16, F32 + * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) @@ -74,7 +96,7 @@ __kernel void softmax_layer_max( { VEC_DATA_TYPE(DATA_TYPE, 16) data = vload16(0, (__global DATA_TYPE *)offset(&src, i << 4, 0)); - max_val = max(data, max_val); + max_val = MAX_OP(data, max_val, DATA_TYPE, 16); } #if defined NON_MULTIPLE_OF_16 @@ -83,14 +105,14 @@ __kernel void softmax_layer_max( data = vload16(0, (__global DATA_TYPE *)offset(&src, width4 << 4, 0)); VEC_DATA_TYPE(SELECT_DATA_TYPE, 16) widx = CONVERT(((uint16)(width4 << 4) + idx16) < width, VEC_DATA_TYPE(SELECT_DATA_TYPE, 16)); - max_val = max(max_val, select(type_min, data, widx)); + max_val = MAX_OP(max_val, select(type_min, data, widx), DATA_TYPE, 16); #endif // Perform max reduction - max_val.s01234567 = max(max_val.s01234567, max_val.s89ABCDEF); - max_val.s0123 = max(max_val.s0123, max_val.s4567); - max_val.s01 = max(max_val.s01, max_val.s23); - max_val.s0 = max(max_val.s0, max_val.s1); + max_val.s01234567 = MAX_OP(max_val.s01234567, max_val.s89ABCDEF, DATA_TYPE, 8); + max_val.s0123 = MAX_OP(max_val.s0123, max_val.s4567, DATA_TYPE, 4); + max_val.s01 = MAX_OP(max_val.s01, max_val.s23, DATA_TYPE, 2); + max_val.s0 = MAX_OP(max_val.s0, max_val.s1, DATA_TYPE, 1); // Store result *((__global DATA_TYPE *)dst.ptr) = max_val.s0; @@ -100,28 +122,28 @@ __kernel void softmax_layer_max( * then gets the exponent of each element as sums all elements across each row. * * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short - * @note In case F16 is used -DUSE_HALF must be passed otherwise the kernel will default to used F32. + * @note Fixed point position must be given as a preprocessor argument using -DFIXED_POINT_POSITION=pos. e.g. DFIXED_POINT_POSITION=4 * @note In case the input is not multiple of 16 -DNON_MULTIPLE_OF_16 must be passed. * - * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16, F32 + * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: QS8/F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[in] max_ptr Pointer to the max values tensor slice. Supported data types: F16, F32 + * @param[in] max_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr * @param[in] max_stride_x Stride of the max values tensor in X dimension (in bytes) * @param[in] max_step_x max_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] max_stride_y Stride of the max values tensor in Y dimension (in bytes) * @param[in] max_step_y max_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] max_offset_first_element_in_bytes The offset of the first element in the max values tensor - * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: F16, F32 + * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: F16, F32 + * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes) * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes) @@ -154,28 +176,30 @@ __kernel void softmax_layer_shift_exp_sum( { VEC_DATA_TYPE(DATA_TYPE, 16) data = vload16(0, (__global DATA_TYPE *)offset(&src, i << 4, 0)); - data = exp(data - max_val); + data = SUB_OP(data, max_val, DATA_TYPE, 16); + data = EXP_OP(data, DATA_TYPE, 16); vstore16(data, 0, (__global DATA_TYPE *)offset(&dst, i << 4, 0)); - sum1D += data; + sum1D = ADD_OP(sum1D, data, DATA_TYPE, 16); } #if defined NON_MULTIPLE_OF_16 // Handle non multiple of 16 VEC_DATA_TYPE(DATA_TYPE, 16) data = vload16(0, (__global DATA_TYPE *)offset(&src, width4 << 4, 0)); - data = exp(data - max_val); + data = SUB_OP(data, max_val, DATA_TYPE, 16); + data = EXP_OP(data, DATA_TYPE, 16); VEC_DATA_TYPE(SELECT_DATA_TYPE, 16) widx = CONVERT(((uint16)(width4 << 4) + idx16) < width, VEC_DATA_TYPE(SELECT_DATA_TYPE, 16)); data = select(0, data, widx); vstore16(data, 0, (__global DATA_TYPE *)offset(&dst, width4 << 4, 0)); - sum1D += data; + sum1D = ADD_OP(sum1D, data, DATA_TYPE, 16); #endif // Perform min/max reduction - sum1D.s01234567 = sum1D.s01234567 + sum1D.s89ABCDEF; - sum1D.s0123 = sum1D.s0123 + sum1D.s4567; - sum1D.s01 = sum1D.s01 + sum1D.s23; - sum1D.s0 = sum1D.s0 + sum1D.s1; + sum1D.s01234567 = ADD_OP(sum1D.s01234567, sum1D.s89ABCDEF, DATA_TYPE, 8); + sum1D.s0123 = ADD_OP(sum1D.s0123, sum1D.s4567, DATA_TYPE, 4); + sum1D.s01 = ADD_OP(sum1D.s01, sum1D.s23, DATA_TYPE, 2); + sum1D.s0 = ADD_OP(sum1D.s0, sum1D.s1, DATA_TYPE, 1); // Calculate and store result *((__global DATA_TYPE *)sum.ptr) = sum1D.s0; @@ -184,20 +208,21 @@ __kernel void softmax_layer_shift_exp_sum( /** Divides all the values of the input tensor by the sum calculated from softmax_layer_shift_exp_sum kernel. * * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short + * @note Fixed point position must be given as a preprocessor argument using -DFIXED_POINT_POSITION=pos. e.g. DFIXED_POINT_POSITION=4 * - * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16, F32 + * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: QS8/F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[in] sum_ptr Pointer to the sum values tensor slice. Supported data types: F16, F32 + * @param[in] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes) * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes) * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor - * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: F16, F32 + * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) @@ -217,5 +242,5 @@ __kernel void softmax_layer_norm( DATA_TYPE sum_val = *((__global DATA_TYPE *)offset(&sum, 0, get_global_id(1))); VEC_DATA_TYPE(DATA_TYPE, 16) data = vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)); - vstore16(data / sum_val, 0, (__global DATA_TYPE *)offset(&dst, 0, 0)); + vstore16(DIV_OP(data, sum_val, DATA_TYPE, 16), 0, (__global DATA_TYPE *)offset(&dst, 0, 0)); } diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp index 0470d5243e..c488f90b91 100644 --- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp +++ b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp @@ -41,9 +41,9 @@ using namespace arm_compute; void CLLogits1DMaxKernel::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output); _input = input; _output = output; @@ -52,7 +52,12 @@ void CLLogits1DMaxKernel::configure(const ICLTensor *input, ICLTensor *output) const unsigned int num_elems_processed_per_iteration = ceil_to_multiple(input->info()->dimension(0), 16); // Set build options - std::set<std::string> build_opts{ "-DUSE_" + string_from_data_type(input->info()->data_type()) }; + std::set<std::string> build_opts; + build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()))); + if(is_data_type_fixed_point(input->info()->data_type())) + { + build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position()))); + } // Tell the kernel that the width is not a multiple of 16 if((input->info()->dimension(0) % max_cl_vector_width) != 0) @@ -88,11 +93,10 @@ CLLogits1DShiftExpSumKernel::CLLogits1DShiftExpSumKernel() void CLLogits1DShiftExpSumKernel::configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(max, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(sum, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, max, sum); + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, max, sum); _input = input; _max = max; @@ -103,7 +107,12 @@ void CLLogits1DShiftExpSumKernel::configure(const ICLTensor *input, const ICLTen const unsigned int num_elems_processed_per_iteration = ceil_to_multiple(input->info()->dimension(0), 16); // Set build options - std::set<std::string> build_opts{ "-DUSE_" + string_from_data_type(input->info()->data_type()) }; + std::set<std::string> build_opts; + build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()))); + if(is_data_type_fixed_point(input->info()->data_type())) + { + build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position()))); + } // Tell the kernel that the width is not a multiple of 16 if((input->info()->dimension(0) % max_cl_vector_width) != 0) @@ -161,10 +170,10 @@ CLLogits1DNormKernel::CLLogits1DNormKernel() void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(sum, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, sum); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum, output); + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, sum, output); _input = input; _sum = sum; @@ -172,7 +181,11 @@ void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *su // Set build options std::set<std::string> build_opts; - build_opts.emplace(("-DUSE_" + string_from_data_type(input->info()->data_type()))); + build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()))); + if(is_data_type_fixed_point(input->info()->data_type())) + { + build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position()))); + } // Create kernel _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("softmax_layer_norm", build_opts)); diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index 2a78c58053..32dc8ff107 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -36,15 +36,15 @@ CLSoftmaxLayer::CLSoftmaxLayer() void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); // Create intermediate tensors shapes - _tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type())); + _tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position())); TensorShape shape = input->info()->tensor_shape(); shape.set(0, 1); - TensorInfo tensor_info_max_sum(shape, input->info()->num_channels(), input->info()->data_type()); + TensorInfo tensor_info_max_sum(shape, input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()); _max.allocator()->init(tensor_info_max_sum); _sum.allocator()->init(tensor_info_max_sum); diff --git a/tests/benchmark/main.cpp b/tests/benchmark/main.cpp index 0120aaad6c..7ce6a26acd 100644 --- a/tests/benchmark/main.cpp +++ b/tests/benchmark/main.cpp @@ -90,7 +90,7 @@ int main(int argc, char **argv) #endif std::cout << "Using " << user_config.threads << " CPU " << (user_config.threads == 1 ? "thread" : "threads") << "\n"; - std::cout << "Seed: " << library->seed(); + std::cout << "Seed: " << library->seed() << "\n"; arm_compute::Scheduler::get().set_num_threads(user_config.threads); ::benchmark::RunSpecifiedBenchmarks(); diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp new file mode 100644 index 0000000000..aa80e506ed --- /dev/null +++ b/tests/validation/CL/SoftmaxLayer.cpp @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "CL/CLAccessor.h" +#include "CL/Helper.h" +#include "Globals.h" +#include "PaddingCalculator.h" +#include "TensorLibrary.h" +#include "TypePrinter.h" +#include "Utils.h" +#include "validation/Datasets.h" +#include "validation/Reference.h" +#include "validation/Validation.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h" + +#include "boost_wrapper.h" + +#include <random> +#include <string> + +using namespace arm_compute; +using namespace arm_compute::test; +using namespace arm_compute::test::cl; +using namespace arm_compute::test::validation; + +namespace +{ +/** Tolerance for float operations */ +const float tolerance = 0.000001f; +/** Tolerance for fixed point operations */ +const float tolerance_fixed_point = 2.f; + +/** Compute OpenCL softmax layer function. + * + * @param[in] shape Shape of the input and output tensors. + * @param[in] dt Shape Data type of tensors. + * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of fixed point numbers. + * + * @return Computed output tensor. + */ +CLTensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0) +{ + // Create tensors + CLTensor src = create_tensor(shape, dt, 1, fixed_point_position); + CLTensor dst = create_tensor(shape, dt, 1, fixed_point_position); + + // Create and configure function + CLSoftmaxLayer smx_layer; + smx_layer.configure(&src, &dst); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + + BOOST_TEST(!src.info()->is_resizable()); + BOOST_TEST(!dst.info()->is_resizable()); + + // Fill tensors + if(arm_compute::is_data_type_float(dt)) + { + std::uniform_real_distribution<> distribution(-1000.f, 1000.f); + library->fill(CLAccessor(src), distribution, 0); + } + else + { + int one_fixed = 1 << fixed_point_position; + std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); + library->fill(CLAccessor(src), distribution, 0); + } + + // Compute function + smx_layer.run(); + + return dst; +} +} // namespace + +#ifndef DOXYGEN_SKIP_THIS +BOOST_AUTO_TEST_SUITE(CL) +BOOST_AUTO_TEST_SUITE(SoftmaxLayer) + +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt) +{ + // Set fixed point position data type allowed + int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0; + + // Create tensors + CLTensor src = create_tensor(shape, dt, 1, fixed_point_position); + CLTensor dst = create_tensor(shape, dt, 1, fixed_point_position); + + BOOST_TEST(src.info()->is_resizable()); + BOOST_TEST(dst.info()->is_resizable()); + + // Create and configure function + CLSoftmaxLayer smx_layer; + smx_layer.configure(&src, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(src.info()->valid_region(), valid_region); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + validate(src.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +BOOST_AUTO_TEST_SUITE(Float) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt) +{ + // Compute function + CLTensor dst = compute_softmax_layer(shape, dt); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt); + + // Validate output + validate(CLAccessor(dst), ref_dst, tolerance); +} + +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt) +{ + // Compute function + CLTensor dst = compute_softmax_layer(shape, dt); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt); + + // Validate output + validate(CLAccessor(dst), ref_dst, tolerance); +} +BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE(Quantized) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFixedPointDataTypes() * boost::unit_test::data::xrange(1, 6), + shape, dt, fixed_point_position) +{ + // Compute function + CLTensor dst = compute_softmax_layer(shape, dt, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt, fixed_point_position); + + // Validate output + validate(CLAccessor(dst), ref_dst, tolerance_fixed_point); +} + +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFixedPointDataTypes() * boost::unit_test::data::xrange(1, 6), + shape, dt, fixed_point_position) +{ + // Compute function + CLTensor dst = compute_softmax_layer(shape, dt, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt, fixed_point_position); + + // Validate output + validate(CLAccessor(dst), ref_dst, tolerance_fixed_point); +} +BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END() +#endif diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 519cac3f29..66a9e03ace 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -83,7 +83,7 @@ Tensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_po // Fill tensors if(arm_compute::is_data_type_float(dt)) { - std::uniform_real_distribution<> distribution(-10, 10); + std::uniform_real_distribution<> distribution(-1000.f, 1000.f); library->fill(NEAccessor(src), distribution, 0); } else diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp index 8a2b73ecef..be6ddba633 100644 --- a/tests/validation/Reference.cpp +++ b/tests/validation/Reference.cpp @@ -629,7 +629,7 @@ RawTensor Reference::compute_reference_softmax_layer(const TensorShape &shape, D // Fill reference if(arm_compute::is_data_type_float(dt)) { - std::uniform_real_distribution<> distribution(-10, 10); + std::uniform_real_distribution<> distribution(-1000.f, 1000.f); library->fill(ref_src, distribution, 0); } else diff --git a/tests/validation/main.cpp b/tests/validation/main.cpp index 22a56d2251..c89372637f 100644 --- a/tests/validation/main.cpp +++ b/tests/validation/main.cpp @@ -62,7 +62,7 @@ struct GlobalFixture library = cpp14::make_unique<TensorLibrary>(user_config.path.get()); } - std::cout << "Seed: " << library->seed(); + std::cout << "Seed: " << library->seed() << "\n"; } }; |