From 8cdfdb83c89178b5cf654a5b27471950ab1b997e Mon Sep 17 00:00:00 2001 From: Frank Lei Date: Tue, 2 Jan 2018 16:49:33 +0800 Subject: APPBROWSER-366: Add DepthwiseConvolutionLayer(fp16 only) support. Change-Id: I051b7e56b60bf1a55cdf014539ef71346d3aee26 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/114737 Reviewed-by: Anthony Barbier Tested-by: Jenkins --- arm_compute/core/GLES_COMPUTE/GCKernels.h | 1 + .../kernels/GCDepthwiseConvolutionLayer3x3Kernel.h | 76 +++++ arm_compute/runtime/GLES_COMPUTE/GCFunctions.h | 1 + .../functions/GCDepthwiseConvolutionLayer.h | 56 ++++ src/core/GLES_COMPUTE/GCKernelLibrary.cpp | 5 + .../cs_shaders/depthwise_convolution3x3.cs | 312 +++++++++++++++++++++ .../GCDepthwiseConvolutionLayer3x3Kernel.cpp | 260 +++++++++++++++++ .../functions/GCDepthwiseConvolutionLayer.cpp | 41 +++ .../GLES_COMPUTE/DepthwiseConvolutionLayer.cpp | 51 ++++ .../GLES_COMPUTE/DepthwiseConvolutionLayer.cpp | 78 ++++++ .../fixtures/DepthwiseConvolutionLayerFixture.h | 1 + .../reference/DepthwiseConvolutionLayer.cpp | 8 +- 12 files changed, 888 insertions(+), 2 deletions(-) create mode 100644 arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h create mode 100644 arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h create mode 100644 src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs create mode 100644 src/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.cpp create mode 100644 src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp create mode 100644 tests/benchmark/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp create mode 100644 tests/validation/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp diff --git a/arm_compute/core/GLES_COMPUTE/GCKernels.h b/arm_compute/core/GLES_COMPUTE/GCKernels.h index c6f4877fa1..5be44984b2 100644 --- a/arm_compute/core/GLES_COMPUTE/GCKernels.h +++ b/arm_compute/core/GLES_COMPUTE/GCKernels.h @@ -31,6 +31,7 @@ #include "arm_compute/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCCol2ImKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h" +#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCDirectConvolutionLayerKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCDropoutLayerKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h" diff --git a/arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h new file mode 100644 index 0000000000..e10769db5e --- /dev/null +++ b/arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_GCDEPTHWISECONVOLUTIONKERNEL3x3_H__ +#define __ARM_COMPUTE_GCDEPTHWISECONVOLUTIONKERNEL3x3_H__ + +#include "arm_compute/core/GLES_COMPUTE/IGCKernel.h" + +namespace arm_compute +{ +class IGCTensor; + +/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor. + */ +class GCDepthwiseConvolutionLayer3x3Kernel : public IGCKernel +{ +public: + /** Default constructor */ + GCDepthwiseConvolutionLayer3x3Kernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GCDepthwiseConvolutionLayer3x3Kernel(const GCDepthwiseConvolutionLayer3x3Kernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GCDepthwiseConvolutionLayer3x3Kernel &operator=(const GCDepthwiseConvolutionLayer3x3Kernel &) = delete; + /** Default Move Constructor. */ + GCDepthwiseConvolutionLayer3x3Kernel(GCDepthwiseConvolutionLayer3x3Kernel &&) = default; + /** Default move assignment operator. */ + GCDepthwiseConvolutionLayer3x3Kernel &operator=(GCDepthwiseConvolutionLayer3x3Kernel &&) = default; + /** Initialize the function's source, destination, conv and border_size. + * + * @param[in] input Source tensor. DataType supported: F16. + * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM]. Data type supported: Same as @p input. + * @param[in] biases (Optional) Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. + * Data type supported: Same as @p input. + * @param[out] output Destination tensor. Data type supported: Same as @p input. + * @param[in] conv_info Padding and stride information to use for the convolution. + */ + void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info); + + // Inherited methods overridden: + void run(const Window &window) override; + BorderSize border_size() const override; + +private: + BorderSize _border_size; + const IGCTensor *_input; + IGCTensor *_output; + const IGCTensor *_weights; + const IGCTensor *_biases; + unsigned int _conv_stride_x; + unsigned int _conv_stride_y; + unsigned int _conv_pad_left; + unsigned int _conv_pad_top; + gles::NDRange _lws; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_GCDEPTHWISECONVOLUTIONKERNEL3x3_H__ */ diff --git a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h index faaf2f0edc..fa688dbfb6 100644 --- a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h +++ b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h @@ -30,6 +30,7 @@ #include "arm_compute/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCBatchNormalizationLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDropoutLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCFillBorder.h" diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h new file mode 100644 index 0000000000..7b99ea5645 --- /dev/null +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_GCDEPTHWISECONVOLUTION_H__ +#define __ARM_COMPUTE_GCDEPTHWISECONVOLUTION_H__ + +#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/GLES_COMPUTE/IGCSimpleFunction.h" + +namespace arm_compute +{ +class IGCTensor; + +/** Basic function to execute a depthwise convolution for kernel size 3x3xC. This function calls the following OpenGLES kernels: + * + * -# @ref GCDepthwiseConvolutionLayer3x3Kernel + * -# @ref GCFillBorderKernel (if pad_x or pad_y > 0) + * + */ +class GCDepthwiseConvolutionLayer3x3 : public IGCSimpleFunction +{ +public: + /** Initialize the function's source, destination, conv and border_size. + * + * @param[in, out] input Source tensor. Data type supported: F16. (Written to only for border filling). + * @param[in] weights Weights tensor. A 3D tensor with shape [3, 3, IFM]. Data type supported: Same as @p input. + * @param[in] biases (Optional) Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed. + * Data type supported: Same as @p input. + * @param[out] output Destination tensor. Data type supported: same as @p input. + * @param[in] conv_info Padding and stride information to use for the convolution. + */ + void configure(IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info); +}; +} +#endif /*__ARM_COMPUTE_GCDEPTHWISECONVOLUTION_H__ */ diff --git a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp index 26b8aaafd6..7766f95bcc 100644 --- a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp +++ b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp @@ -223,6 +223,7 @@ const std::map GCKernelLibrary::_shader_program_map = { "normalize_planar_yuv_layer", "normalize_planar_yuv_layer.cs" }, { "scale_nearest_neighbour", "scale.cs" }, { "arithmetic_add", "arithmetic_add.cs" }, + { "depthwise_convolution_3x3", "depthwise_convolution3x3.cs" }, }; const std::map GCKernelLibrary::_program_source_map = @@ -304,6 +305,10 @@ const std::map GCKernelLibrary::_program_source_map = "arithmetic_add.cs", #include "./cs_shaders/arithmetic_add.csembed" }, + { + "depthwise_convolution3x3.cs", +#include "./cs_shaders/depthwise_convolution3x3.csembed" + }, #endif /* EMBEDDED_KERNELS */ }; diff --git a/src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs b/src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs new file mode 100644 index 0000000000..adfc126c95 --- /dev/null +++ b/src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in; + +#include "helpers_cs.h" + +#if defined(DATA_TYPE_FP16) +precision mediump float; +#endif // DATA_TYPE_FP16 + +/** This kernel performs a depthwise convolution. + * + * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32" + * @note This kernel has multiple optimized depthwise convolution options for FP16. + * The depthwise convolution option must be passed at compile time using "#define PROCESS_nX_nY_nZ" e.g. "#define PROCESS_8X_1Y_1Z" + * @note The convolution stride x must be passed at compile time using "#define STRIDE_X n" e.g. "#define STRIDE_X 1" + * @note In case biases will be added to the convolution "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row. + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16 + * @param[in] src_attrs The attributes of the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_attrs The attributes of the destination tensor + * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr + * @param[in] weights_attrs The attributes of the weights tensor + * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr + * @param[in] biases_attrs The attributes of the weights tensor + */ +SHADER_PARAMS_DECLARATION +{ + Tensor3DAttributes src_attrs; + Tensor3DAttributes dst_attrs; + Tensor3DAttributes weights_attrs; +#ifdef BIAS + VectorAttributes biases_attrs; +#endif /* BIAS */ +}; + +#if defined(DATA_TYPE_FP16) +#if defined(PROCESS_4X_3Y_1Z) +TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly); +TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly); +TENSOR_DECLARATION(3, weightsBuffer, uvec2, weights_ptr, weights_shift, 3, readonly); +#ifdef BIAS +TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly); +#endif /* BIAS */ + +#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride1(offset) + +vec4 convolve1x3(vec4 s[3], vec4 w) +{ + vec4 r; + + r = s[0] * w[0] + s[1] * w[1] + s[2] * w[2]; + + return r; +} + +vec4[3] load_unpack_swizzle_stride1(uint offset) +{ + vec4 s[2]; + s = VLOAD2_UNPACK8_HALF(src_ptr, offset); + + vec4 r[3]; + r[0] = s[0]; + r[1] = vec4(s[0].yzw, s[1].x); + r[2] = vec4(s[0].zw, s[1].xy); + + return r; +} + +void main() +{ + Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift); + Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift); + Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift); + +#ifdef BIAS + VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift); +#endif /* BIAS */ + + vec4 pixels[3]; + for(int i = 0; i < 3; i++) + { + pixels[i] = vec4(0); + } + + uint z_index = gl_GlobalInvocationID.z; + TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_attrs.stride_z); + + vec4 w[3]; + w[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(weights_ptr, weights_iter); + w[1] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0)); + w[2] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0)); + + vec4 s[3]; + vec4 r; + // first line + s = LOAD_UNPACK_SWIZZLE(CURRENT_ITEM_OFFSET(src_iter)); + + r = convolve1x3(s, w[0]); + pixels[0] += r; + + // second line + s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 1, 0)); + + r = convolve1x3(s, w[1]); + pixels[0] += r; + r = convolve1x3(s, w[0]); + pixels[1] += r; + + // third line + s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 2, 0)); + + r = convolve1x3(s, w[2]); + pixels[0] += r; + r = convolve1x3(s, w[1]); + pixels[1] += r; + r = convolve1x3(s, w[0]); + pixels[2] += r; + + // forth line + s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 3, 0)); + + r = convolve1x3(s, w[2]); + pixels[1] += r; + r = convolve1x3(s, w[1]); + pixels[2] += r; + + // fifth line + s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 4, 0)); + + r = convolve1x3(s, w[2]); + pixels[2] += r; + +#ifdef BIAS + vec2 vec2_b; + float b; + + vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index)); + + if(z_index % uint(2) == uint(0)) + { + b = vec2_b.x; + } + else + { + b = vec2_b.y; + } + + for(int i = 0; i < 3; i++) + { + pixels[i] += vec4(b); + } +#endif /* BIAS */ + + STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]); + STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]); + STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]); +} +#elif defined(PROCESS_4X_1Y_1Z) +TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly); +TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly); +TENSOR_DECLARATION(3, weightsBuffer, uvec2, weights_ptr, weights_shift, 3, readonly); +#ifdef BIAS +TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly); +#endif /* BIAS */ + +#if STRIDE_X == 3 +#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride3(offset) +#elif STRIDE_X == 2 +#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride2(offset) +#elif STRIDE_X == 1 /* STRIDE_X == 1 */ +#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride1(offset) +#else /* STRIDE_X not equals 1 or 2 */ +#error STRIDE_X larger than 2 is not supported +#endif /* STRIDE_X == 2 */ + +vec4 convolve1x3(vec4 s[3], vec4 w) +{ + vec4 r; + + r = s[0] * w[0] + s[1] * w[1] + s[2] * w[2]; + + return r; +} + +vec4[3] load_unpack_swizzle_stride1(uint offset) +{ + vec4 s[2]; + s = VLOAD2_UNPACK8_HALF(src_ptr, offset); + + vec4 r[3]; + r[0] = s[0]; + r[1] = vec4(s[0].yzw, s[1].x); + r[2] = vec4(s[0].zw, s[1].xy); + + return r; +} + +vec4[3] load_unpack_swizzle_stride2(uint offset) +{ + vec4 s[3]; + s[0] = LOAD_UNPACK4_HALF(src_ptr, offset); + s[1] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(1)); + s[2] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(2)); + + vec4 r[3]; + r[0] = vec4(s[0].xz, s[1].xz); + r[1] = vec4(s[0].yw, s[1].yw); + r[2] = vec4(s[0].z, s[1].xz, s[2].x); + + return r; +} + +vec4[3] load_unpack_swizzle_stride3(uint offset) +{ + vec4 s[3]; + s[0] = LOAD_UNPACK4_HALF(src_ptr, offset); + s[1] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(1)); + s[2] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(2)); + + vec4 r[3]; + r[0] = vec4(s[0].xw, s[1].z, s[2].y); + r[1] = vec4(s[0].y, s[1].xw, s[2].z); + r[2] = vec4(s[0].z, s[1].y, s[2].xw); + + return r; +} + +void main() +{ + Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift); + Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift); + Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift); + +#ifdef BIAS + VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift); +#endif /* BIAS */ + + vec4 pixels = vec4(0.f); + + uint z_index = gl_GlobalInvocationID.z; + TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_attrs.stride_z); + + vec4 w[3]; + w[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(weights_ptr, weights_iter); + w[1] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0)); + w[2] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0)); + + vec4 s[3]; + vec4 r; + // first line + s = LOAD_UNPACK_SWIZZLE(CURRENT_ITEM_OFFSET(src_iter)); + + r = convolve1x3(s, w[0]); + pixels += r; + + // second line + s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 1, 0)); + + r = convolve1x3(s, w[1]); + pixels += r; + + // third line + s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 2, 0)); + + r = convolve1x3(s, w[2]); + pixels += r; + +#ifdef BIAS + vec2 vec2_b; + float b; + + vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index)); + + if(z_index % uint(2) == uint(0)) + { + b = vec2_b.x; + } + else + { + b = vec2_b.y; + } + + pixels += vec4(b); +#endif /* BIAS */ + + STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels); +} +#endif /* PROCESS_4X_3Y_1Z */ +#endif /* DATA_TYPE_FP16 */ diff --git a/src/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.cpp new file mode 100644 index 0000000000..28b5bd2d62 --- /dev/null +++ b/src/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthwiseConvolutionLayer3x3Kernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/GLES_COMPUTE/GCHelpers.h" +#include "arm_compute/core/GLES_COMPUTE/GCKernelLibrary.h" +#include "arm_compute/core/GLES_COMPUTE/IGCKernel.h" +#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" + +using namespace arm_compute; + +namespace +{ +/** Calculates expected output shape dimension + * + * @param[in] Input shape + * + * @return Expected output shape + */ +TensorShape get_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info) +{ + unsigned int output_width = 0; + unsigned int output_height = 0; + + std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info); + + TensorShape output_shape = input_shape; + output_shape.set(0, output_width); + output_shape.set(1, output_height); + + return output_shape; +} +} // namespace + +GCDepthwiseConvolutionLayer3x3Kernel::GCDepthwiseConvolutionLayer3x3Kernel() + : _border_size(0), _input(), _output(), _weights(), _biases(), _conv_stride_x(0), _conv_stride_y(0), _conv_pad_left(0), _conv_pad_top(0), _lws(gles::NDRange(1U, 1U, 1U)) +{ +} + +BorderSize GCDepthwiseConvolutionLayer3x3Kernel::border_size() const +{ + return _border_size; +} + +void GCDepthwiseConvolutionLayer3x3Kernel::configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != 3 || weights->info()->dimension(1) != 3); + + if(biases != nullptr) + { + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); + ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(2)); + ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1); + } + + // Get convolved dimensions + TensorShape output_shape = get_output_shape(input->info()->tensor_shape(), weights->info()->tensor_shape(), conv_info); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output->info(), + output_shape, + 1, + input->info()->data_type(), + input->info()->fixed_point_position()); + + ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape); + + _input = input; + _output = output; + _weights = weights; + _biases = biases; + _conv_stride_x = conv_info.stride().first; + _conv_stride_y = conv_info.stride().second; + _conv_pad_left = conv_info.pad_left(); + _conv_pad_top = conv_info.pad_top(); + _border_size = BorderSize(_conv_pad_top, conv_info.pad_right(), conv_info.pad_bottom(), _conv_pad_left); + + // Set build options + ARM_COMPUTE_ERROR_ON(_conv_stride_x < 1 || _conv_stride_x > 3); + std::set options; + + options.emplace("#define LOCAL_SIZE_X " + support::cpp11::to_string(_lws[0])); + options.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(_lws[1])); + options.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(_lws[2])); + options.emplace("#define STRIDE_X " + support::cpp11::to_string(_conv_stride_x)); + options.emplace("#define STRIDE_Y " + support::cpp11::to_string(_conv_stride_y)); + + std::string dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16"; + options.emplace(("#define " + dt_name)); + + unsigned int num_elems_read_per_iteration_x = 8; + unsigned int num_elems_read_per_iteration_y = 1; + unsigned int num_elems_written_per_iteration_x = 4; + unsigned int num_elems_written_per_iteration_y = 1; + unsigned int num_elems_written_per_iteration_z = 1; + + if((_conv_stride_x == 1) && (_conv_stride_y == 1)) + { + switch(input->info()->data_type()) + { +#define PROCESS_4X_3Y_1Z + + case DataType::F16: +#if defined(PROCESS_4X_3Y_1Z) + options.emplace("#define PROCESS_4X_3Y_1Z"); + num_elems_read_per_iteration_y = 5; + num_elems_written_per_iteration_y = 3; +#endif /* PROCESS_4X_3Y_1Z */ +#undef PROCESS_4X_3Y_1Z + break; + + default: + ARM_COMPUTE_ERROR("Current data type is not supported"); + break; + } + } + else + { + switch(input->info()->data_type()) + { + case DataType::F16: + options.emplace("#define PROCESS_4X_1Y_1Z"); + break; + + default: + ARM_COMPUTE_ERROR("Current data type is not supported"); + break; + } + } + + if(_biases != nullptr) + { + options.emplace("#define BIAS"); + } + + // Create kernel + std::string kernel_name = "depthwise_convolution_3x3"; + _kernel = static_cast(GCKernelLibrary::get().create_kernel(kernel_name, options)); + + // Calculate output right and bottom border + const int output_width = output->info()->dimension(0); + const int output_height = output->info()->dimension(1); + const int output_padding_right = ceil_to_multiple(output_width, num_elems_written_per_iteration_x * _lws[0]) - output_width; + const int output_padding_bottom = ceil_to_multiple(output_height, num_elems_written_per_iteration_y * _lws[1]) - output_height; + + // Calculate input right and bottom border + const int input_width = input->info()->dimension(0); + const int input_height = input->info()->dimension(1); + const int padding_right = ceil_to_multiple(((output_width + output_padding_right) * _conv_stride_x + 2), num_elems_read_per_iteration_x * _lws[0]) - _conv_pad_left - input_width; + const int padding_bottom = ceil_to_multiple(((output_height + output_padding_bottom) * _conv_stride_y + 2), num_elems_read_per_iteration_y * _lws[1]) - _conv_pad_top - input_height; + + BorderSize border = BorderSize(0, output_padding_right, output_padding_bottom, 0); + + Window win = calculate_max_enlarged_window(*output->info(), Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y, num_elems_written_per_iteration_z), border); + + AccessWindowStatic input_access(input->info(), -_conv_pad_left, -_conv_pad_top, input_width + padding_right, input_height + padding_bottom); + AccessWindowStatic weights_access = AccessWindowStatic(nullptr, 0, 0, 0, 0); + AccessWindowStatic bias_access = AccessWindowStatic(nullptr, 0, 0, 0, 1); + + switch(weights->info()->data_type()) + { + case DataType::F16: + weights_access = AccessWindowStatic(weights->info(), 0, 0, 4, 3); + if(_biases != nullptr) + { + bias_access = AccessWindowStatic(_biases->info(), 0, 0, _biases->info()->dimension(0) + 1, 1); + } + break; + + default: + ARM_COMPUTE_ERROR("Current data type is not supported"); + break; + } + + AccessWindowStatic output_access(output->info(), 0, 0, output_width + output_padding_right, output_height + output_padding_bottom); + + if(_biases != nullptr) + { + update_window_and_padding(win, input_access, weights_access, bias_access, output_access); + } + else + { + update_window_and_padding(win, input_access, weights_access, output_access); + } + + output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); + + IGCKernel::configure(win); +} + +void GCDepthwiseConvolutionLayer3x3Kernel::run(const Window &window) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + + _kernel.use(); + + // Create input window and adjust + Window win_in = window; + win_in.adjust(Window::DimX, -_conv_pad_left, true); + win_in.adjust(Window::DimY, -_conv_pad_top, true); + win_in.set_dimension_step(Window::DimX, window.x().step() * _conv_stride_x); + win_in.set_dimension_step(Window::DimY, window.y().step() * _conv_stride_y); + + Window slice_in = win_in.first_slice_window_3D(); + Window slice_out = window.first_slice_window_3D(); + Window slice_weights = window.first_slice_window_3D(); + slice_weights.set_dimension_step(Window::DimX, 0); + slice_weights.set_dimension_step(Window::DimY, 0); + + // Set biases + if(_biases != nullptr) + { + unsigned int idx = 3 * num_arguments_per_3D_tensor(); + Window slice_biases; + slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape()); + add_1D_tensor_argument(idx, _biases, 4, slice_biases); + } + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, 1, slice_in); + add_3D_tensor_argument(idx, _output, 2, slice_out); + add_3D_tensor_argument(idx, _weights, 3, slice_weights); + + _kernel.update_shader_params(); + enqueue(*this, slice_out, _lws); + } + while(window.slide_window_slice_3D(slice_out) && win_in.slide_window_slice_3D(slice_in)); +} diff --git a/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp new file mode 100644 index 0000000000..ef65989f40 --- /dev/null +++ b/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h" + +#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h" +#include "arm_compute/core/PixelValue.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h" +#include "support/ToolchainSupport.h" + +using namespace arm_compute; + +void GCDepthwiseConvolutionLayer3x3::configure(IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, weights, biases, output, conv_info); + _kernel = std::move(k); + + // Configure border handler + _border_handler.configure(input, _kernel->border_size(), BorderMode::CONSTANT, PixelValue(0)); +} diff --git a/tests/benchmark/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp b/tests/benchmark/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp new file mode 100644 index 0000000000..05e82d03b3 --- /dev/null +++ b/tests/benchmark/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h" +#include "tests/GLES_COMPUTE/GCAccessor.h" +#include "tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h" +#include "tests/datasets/MobileNetDepthwiseConvolutionLayerDataset.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "utils/TypePrinter.h" + +namespace arm_compute +{ +namespace test +{ +const auto data_types = framework::dataset::make("DataType", { DataType::F16 }); +using GCDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerFixture; + +TEST_SUITE(GC) + +REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetDepthwiseConvLayer, GCDepthwiseConvolutionLayerFixture, framework::DatasetMode::ALL, + framework::dataset::combine(framework::dataset::combine(datasets::MobileNetDepthwiseConvolutionLayerDataset(), data_types), + framework::dataset::make("Batches", { 1 }))); + +TEST_SUITE_END() +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp b/tests/validation/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp new file mode 100644 index 0000000000..cacf6962ee --- /dev/null +++ b/tests/validation/GLES_COMPUTE/DepthwiseConvolutionLayer.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h" +#include "tests/GLES_COMPUTE/GCAccessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/DepthwiseConvolutionLayerDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +RelativeTolerance tolerance_fp16(half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr float tolerance_num = 0.07f; /**< Tolerance number */ +} // namespace + +TEST_SUITE(GC) +TEST_SUITE(DepthwiseConvolutionLayer) + +template +using GCDepthwiseConvolutionLayerFixture3x3 = DepthwiseConvolutionLayerValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP16) +TEST_SUITE(W3x3) +FIXTURE_DATA_TEST_CASE(RunSmall, GCDepthwiseConvolutionLayerFixture3x3, framework::DatasetMode::ALL, combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), + framework::dataset::make("DataType", + DataType::F16))) +{ + validate(GCAccessor(_target), _reference, tolerance_fp16, tolerance_num); +} +FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthwiseConvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(), + framework::dataset::make("DataType", + DataType::F16))) +{ + validate(GCAccessor(_target), _reference, tolerance_fp16, tolerance_num); +} +TEST_SUITE_END() +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h index 3683f7214a..fc48bcec72 100644 --- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h @@ -74,6 +74,7 @@ protected: break; } case DataType::F32: + case DataType::F16: { std::uniform_real_distribution<> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp index 0e88d3dbd3..08caa8efb8 100644 --- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp @@ -89,14 +89,15 @@ SimpleTensor depthwise_convolution(const SimpleTensor &src, const SimpleTe Coordinates coords(static_cast(x), static_cast(y), static_cast(z), static_cast(r)); size_t filter_offset = filter_plane * z; - T val = 0; + T val(0); for(int j = y - filter_half_height; j <= static_cast(y + filter_half_height); ++j) { for(int i = x - filter_half_width; i <= static_cast(x + filter_half_width); ++i) { coords.set(0, i); coords.set(1, j); - val += *(weights.data() + filter_offset) * tensor_elem_at(src, coords, BorderMode::CONSTANT, 0.f); + T border_value(0); + val += *(weights.data() + filter_offset) * tensor_elem_at(src, coords, BorderMode::CONSTANT, border_value); ++filter_offset; } } @@ -189,6 +190,9 @@ SimpleTensor depthwise_convolution(const SimpleTensor &src, co template SimpleTensor depthwise_convolution(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info); + +template SimpleTensor depthwise_convolution(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &biases, const TensorShape &dst_shape, + const PadStrideInfo &conv_info); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1