From bcf8a968da4b26926df8bb770df16d82146bcb54 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 12 Oct 2018 10:51:31 +0100 Subject: COMPMID-1580 Implement ReduceMean in NEON Change-Id: Id974efad304c2513b8824a6561ad45ee60b9e7fb Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/153763 Reviewed-by: Giuseppe Rossini Reviewed-by: Isabella Gottardi Tested-by: bsgcomp --- .../core/NEON/kernels/NEReductionOperationKernel.h | 4 +- arm_compute/core/NEON/wrapper/intrinsics/gethigh.h | 53 +++ arm_compute/core/NEON/wrapper/intrinsics/getlane.h | 204 +++++++++ arm_compute/core/NEON/wrapper/intrinsics/getlow.h | 53 +++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 6 + arm_compute/core/NEON/wrapper/intrinsics/load.h | 6 + arm_compute/core/NEON/wrapper/intrinsics/movl.h | 49 +++ arm_compute/core/NEON/wrapper/intrinsics/movn.h | 62 +++ arm_compute/core/NEON/wrapper/intrinsics/mul.h | 6 + arm_compute/core/NEON/wrapper/intrinsics/padd.h | 53 +++ arm_compute/core/NEON/wrapper/intrinsics/store.h | 6 + arm_compute/runtime/NEON/NEFunctions.h | 1 + arm_compute/runtime/NEON/functions/NEReduceMean.h | 79 ++++ .../runtime/NEON/functions/NEReductionOperation.h | 13 +- .../NEON/kernels/NEReductionOperationKernel.cpp | 481 ++++++++++++++++++++- src/runtime/NEON/functions/NEReduceMean.cpp | 117 +++++ .../NEON/functions/NEReductionOperation.cpp | 32 +- tests/validation/CL/ReductionOperation.cpp | 2 +- tests/validation/NEON/ReduceMean.cpp | 176 ++++++++ tests/validation/NEON/ReductionOperation.cpp | 36 +- .../fixtures/ReductionOperationFixture.h | 52 ++- tests/validation/reference/ReductionOperation.cpp | 10 +- 22 files changed, 1439 insertions(+), 62 deletions(-) create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/gethigh.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/getlane.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/getlow.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/movl.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/movn.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/padd.h create mode 100644 arm_compute/runtime/NEON/functions/NEReduceMean.h create mode 100644 src/runtime/NEON/functions/NEReduceMean.cpp create mode 100644 tests/validation/NEON/ReduceMean.cpp diff --git a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h index a20cd46434..a4cb330445 100644 --- a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h +++ b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h @@ -53,7 +53,7 @@ public: /** Set the source, destination of the kernel * - * @param[in] input Source tensor. Data type supported: F32. Data layouts supported: NCHW. + * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. * @param[out] output Destination tensor.Data types and data layouts supported: same as @p input. * Output will have the same number of dimensions as input. * @param[in] axis Axis along which to reduce. Supported reduction axis : 0 @@ -63,7 +63,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperationKernel. * - * @param[in] input Source tensor info. Data type supported: F32. Data layouts supported: NCHW. + * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. * @param[in] output Destination tensor info.Data types and data layouts supported: same as @p input. * Output will have the same number of dimensions as input. * @param[in] axis Axis along which to reduce. Supported reduction axis : 0 diff --git a/arm_compute/core/NEON/wrapper/intrinsics/gethigh.h b/arm_compute/core/NEON/wrapper/intrinsics/gethigh.h new file mode 100644 index 0000000000..47b0116b84 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/gethigh.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_GET_HIGH_H__ +#define __ARM_COMPUTE_WRAPPER_GET_HIGH_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VGETHIGH_IMPL(half_vtype, vtype, postfix) \ + inline half_vtype vgethigh(const vtype val) \ + { \ + return vget_high_##postfix(val); \ + } + +VGETHIGH_IMPL(uint8x8_t, uint8x16_t, u8) +VGETHIGH_IMPL(int8x8_t, int8x16_t, s8) +VGETHIGH_IMPL(uint16x4_t, uint16x8_t, u16) +VGETHIGH_IMPL(int16x4_t, int16x8_t, s16) +VGETHIGH_IMPL(uint32x2_t, uint32x4_t, u32) +VGETHIGH_IMPL(int32x2_t, int32x4_t, s32) +VGETHIGH_IMPL(float32x2_t, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETHIGH_IMPL(float16x4_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VGETHIGH_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_GET_HIGH_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/getlane.h b/arm_compute/core/NEON/wrapper/intrinsics/getlane.h new file mode 100644 index 0000000000..107ce44e0c --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/getlane.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_GET_LANE_H__ +#define __ARM_COMPUTE_WRAPPER_GET_LANE_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VGETLANE_IMPL_8(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vget_lane_##postfix(vector, 0); \ + case 1: \ + return vget_lane_##postfix(vector, 1); \ + case 2: \ + return vget_lane_##postfix(vector, 2); \ + case 3: \ + return vget_lane_##postfix(vector, 3); \ + case 4: \ + return vget_lane_##postfix(vector, 4); \ + case 5: \ + return vget_lane_##postfix(vector, 5); \ + case 6: \ + return vget_lane_##postfix(vector, 6); \ + case 7: \ + return vget_lane_##postfix(vector, 7); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETLANE_IMPL_4(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vget_lane_##postfix(vector, 0); \ + case 1: \ + return vget_lane_##postfix(vector, 1); \ + case 2: \ + return vget_lane_##postfix(vector, 2); \ + case 3: \ + return vget_lane_##postfix(vector, 3); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETLANE_IMPL_2(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vget_lane_##postfix(vector, 0); \ + case 1: \ + return vget_lane_##postfix(vector, 1); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +VGETLANE_IMPL_8(uint8_t, uint8x8_t, u8) +VGETLANE_IMPL_8(int8_t, int8x8_t, s8) +VGETLANE_IMPL_4(uint16_t, uint16x4_t, u16) +VGETLANE_IMPL_4(int16_t, int16x4_t, s16) +VGETLANE_IMPL_2(uint32_t, uint32x2_t, u32) +VGETLANE_IMPL_2(int32_t, int32x2_t, s32) +VGETLANE_IMPL_2(float, float32x2_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETLANE_IMPL_4(float16_t, float16x4_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#define VGETQLANE_IMPL_16(stype, vtype, postfix) \ + inline stype vgetqlane(const vtype vector, const int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + case 2: \ + return vgetq_lane_##postfix(vector, 2); \ + case 3: \ + return vgetq_lane_##postfix(vector, 3); \ + case 4: \ + return vgetq_lane_##postfix(vector, 4); \ + case 5: \ + return vgetq_lane_##postfix(vector, 5); \ + case 6: \ + return vgetq_lane_##postfix(vector, 6); \ + case 7: \ + return vgetq_lane_##postfix(vector, 7); \ + case 8: \ + return vgetq_lane_##postfix(vector, 8); \ + case 9: \ + return vgetq_lane_##postfix(vector, 9); \ + case 10: \ + return vgetq_lane_##postfix(vector, 10); \ + case 11: \ + return vgetq_lane_##postfix(vector, 11); \ + case 12: \ + return vgetq_lane_##postfix(vector, 12); \ + case 13: \ + return vgetq_lane_##postfix(vector, 13); \ + case 14: \ + return vgetq_lane_##postfix(vector, 14); \ + case 15: \ + return vgetq_lane_##postfix(vector, 15); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETQLANE_IMPL_8(stype, vtype, postfix) \ + inline stype vgetqlane(const vtype vector, const int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + case 2: \ + return vgetq_lane_##postfix(vector, 2); \ + case 3: \ + return vgetq_lane_##postfix(vector, 3); \ + case 4: \ + return vgetq_lane_##postfix(vector, 4); \ + case 5: \ + return vgetq_lane_##postfix(vector, 5); \ + case 6: \ + return vgetq_lane_##postfix(vector, 6); \ + case 7: \ + return vgetq_lane_##postfix(vector, 7); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETQLANE_IMPL_4(stype, vtype, postfix) \ + inline stype vgetqlane(const vtype vector, const int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + case 2: \ + return vgetq_lane_##postfix(vector, 2); \ + case 3: \ + return vgetq_lane_##postfix(vector, 3); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +VGETQLANE_IMPL_16(uint8_t, uint8x16_t, u8) +VGETQLANE_IMPL_16(int8_t, int8x16_t, s8) +VGETQLANE_IMPL_8(uint16_t, uint16x8_t, u16) +VGETQLANE_IMPL_8(int16_t, int16x8_t, s16) +VGETQLANE_IMPL_4(uint32_t, uint32x4_t, u32) +VGETQLANE_IMPL_4(int32_t, int32x4_t, s32) +VGETQLANE_IMPL_4(float, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETQLANE_IMPL_8(float16_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VGETLANE_IMPL_8 +#undef VGETLANE_IMPL_4 +#undef VGETLANE_IMPL_2 +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_GET_LANE_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/getlow.h b/arm_compute/core/NEON/wrapper/intrinsics/getlow.h new file mode 100644 index 0000000000..cc5d8bb2f2 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/getlow.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_GET_LOW_H__ +#define __ARM_COMPUTE_WRAPPER_GET_LOW_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VGETLOW_IMPL(half_vtype, vtype, postfix) \ + inline half_vtype vgetlow(const vtype val) \ + { \ + return vget_low_##postfix(val); \ + } + +VGETLOW_IMPL(uint8x8_t, uint8x16_t, u8) +VGETLOW_IMPL(int8x8_t, int8x16_t, s8) +VGETLOW_IMPL(uint16x4_t, uint16x8_t, u16) +VGETLOW_IMPL(int16x4_t, int16x8_t, s16) +VGETLOW_IMPL(uint32x2_t, uint32x4_t, u32) +VGETLOW_IMPL(int32x2_t, int32x4_t, s32) +VGETLOW_IMPL(float32x2_t, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETLOW_IMPL(float16x4_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VGETLOW_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_GET_LOW_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index 58bfba9645..2e6fd75005 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -28,13 +28,19 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/and.h" #include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h" #include "arm_compute/core/NEON/wrapper/intrinsics/exp.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/gethigh.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/getlane.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/getlow.h" #include "arm_compute/core/NEON/wrapper/intrinsics/inv.h" #include "arm_compute/core/NEON/wrapper/intrinsics/load.h" #include "arm_compute/core/NEON/wrapper/intrinsics/max.h" #include "arm_compute/core/NEON/wrapper/intrinsics/min.h" #include "arm_compute/core/NEON/wrapper/intrinsics/mla.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/movl.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/movn.h" #include "arm_compute/core/NEON/wrapper/intrinsics/mul.h" #include "arm_compute/core/NEON/wrapper/intrinsics/neg.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/padd.h" #include "arm_compute/core/NEON/wrapper/intrinsics/store.h" #endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/load.h b/arm_compute/core/NEON/wrapper/intrinsics/load.h index 442d857497..b5d9ed2a35 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/load.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/load.h @@ -45,6 +45,9 @@ VLOAD_IMPL(int32_t, int32x2_t, s32) //VLOAD_IMPL(uint64_t, uint64x1_t, u64) //VLOAD_IMPL(int64_t, int64x1_t, s64) VLOAD_IMPL(float, float32x2_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOAD_IMPL(float16_t, float16x4_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #define VLOADQ_IMPL(stype, vtype, postfix) \ inline vtype vloadq(const stype *ptr) \ @@ -61,6 +64,9 @@ VLOADQ_IMPL(int32_t, int32x4_t, s32) //VLOAD_IMPL(uint64_t, uint64x1_t, u64) //VLOAD_IMPL(int64_t, int64x1_t, s64) VLOADQ_IMPL(float, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOADQ_IMPL(float16_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VLOAD_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/movl.h b/arm_compute/core/NEON/wrapper/intrinsics/movl.h new file mode 100644 index 0000000000..728fe4e097 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/movl.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_MOVL_H__ +#define __ARM_COMPUTE_WRAPPER_MOVL_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMOVL_IMPL(ptype, vtype, prefix, postfix) \ + inline ptype vmovl(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VMOVL_IMPL(uint16x8_t, uint8x8_t, vmovl, u8) +VMOVL_IMPL(int16x8_t, int8x8_t, vmovl, s8) +VMOVL_IMPL(uint32x4_t, uint16x4_t, vmovl, u16) +VMOVL_IMPL(int32x4_t, int16x4_t, vmovl, s16) +VMOVL_IMPL(uint64x2_t, uint32x2_t, vmovl, u32) +VMOVL_IMPL(int64x2_t, int32x2_t, vmovl, s32) + +#undef VMOVL_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_MOVL_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/movn.h b/arm_compute/core/NEON/wrapper/intrinsics/movn.h new file mode 100644 index 0000000000..6ed262edb6 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/movn.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_MOVN_H__ +#define __ARM_COMPUTE_WRAPPER_MOVN_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMOVN_IMPL(dtype, vtype, prefix, postfix) \ + inline dtype vmovn(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VMOVN_IMPL(uint32x2_t, uint64x2_t, vmovn, u64) +VMOVN_IMPL(int32x2_t, int64x2_t, vmovn, s64) +VMOVN_IMPL(uint16x4_t, uint32x4_t, vmovn, u32) +VMOVN_IMPL(int16x4_t, int32x4_t, vmovn, s32) +VMOVN_IMPL(uint8x8_t, uint16x8_t, vmovn, u16) +VMOVN_IMPL(int8x8_t, int16x8_t, vmovn, s16) + +#define VQMOVN_IMPL(dtype, vtype, prefix, postfix) \ + inline dtype vqmovn(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VQMOVN_IMPL(uint32x2_t, uint64x2_t, vqmovn, u64) +VQMOVN_IMPL(int32x2_t, int64x2_t, vqmovn, s64) +VQMOVN_IMPL(uint16x4_t, uint32x4_t, vqmovn, u32) +VQMOVN_IMPL(int16x4_t, int32x4_t, vqmovn, s32) +VQMOVN_IMPL(uint8x8_t, uint16x8_t, vqmovn, u16) +VQMOVN_IMPL(int8x8_t, int16x8_t, vqmovn, s16) + +#undef VMOVN_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_MOVN_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/mul.h b/arm_compute/core/NEON/wrapper/intrinsics/mul.h index c1908fc7b3..932b746965 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/mul.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/mul.h @@ -43,6 +43,9 @@ VMUL_IMPL(int16x4_t, int16x4_t, vmul, s16) VMUL_IMPL(uint32x2_t, uint32x2_t, vmul, u32) VMUL_IMPL(int32x2_t, int32x2_t, vmul, s32) VMUL_IMPL(float32x2_t, float32x2_t, vmul, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMUL_IMPL(float16_t, float16x4_t, vmul, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VMUL_IMPL(uint8_t, uint8x16_t, vmulq, u8) VMUL_IMPL(int8_t, int8x16_t, vmulq, s8) @@ -51,6 +54,9 @@ VMUL_IMPL(int16_t, int16x8_t, vmulq, s16) VMUL_IMPL(uint32_t, uint32x4_t, vmulq, u32) VMUL_IMPL(int32_t, int32x4_t, vmulq, s32) VMUL_IMPL(float32x4_t, float32x4_t, vmulq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMUL_IMPL(float16_t, float16x8_t, vmulq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VMUL_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/padd.h b/arm_compute/core/NEON/wrapper/intrinsics/padd.h new file mode 100644 index 0000000000..5ee2173df8 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/padd.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_PADD_H__ +#define __ARM_COMPUTE_WRAPPER_PADD_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VPADD_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vpadd(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8) +VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8) +VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16) +VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16) +VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32) +VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32) +VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPADD_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_PADD_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/store.h b/arm_compute/core/NEON/wrapper/intrinsics/store.h index be89602c09..35c427902e 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/store.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/store.h @@ -45,6 +45,9 @@ VSTORE_IMPL(int32_t, int32x2_t, vst1, s32) //VSTORE_IMPL(uint64_t, 1, vst1, u64) //VSTORE_IMPL(int64_t, 1, vst1, s64) VSTORE_IMPL(float, float32x2_t, vst1, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSTORE_IMPL(float16_t, float16x4_t, vst1, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VSTORE_IMPL(uint8_t, uint8x16_t, vst1q, u8) VSTORE_IMPL(int8_t, int8x16_t, vst1q, s8) @@ -55,6 +58,9 @@ VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32) //VSTORE_IMPL(uint64_t, 2, vst1q, u64) //VSTORE_IMPL(int64_t, 2, vst1q, s64) VSTORE_IMPL(float, float32x4_t, vst1q, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSTORE_IMPL(float16_t, float16x8_t, vst1q, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VSTORE_IMPL } // namespace wrapper diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h index 2bf8bcd515..57bd5859f9 100644 --- a/arm_compute/runtime/NEON/NEFunctions.h +++ b/arm_compute/runtime/NEON/NEFunctions.h @@ -101,6 +101,7 @@ #include "arm_compute/runtime/NEON/functions/NEQuantizationLayer.h" #include "arm_compute/runtime/NEON/functions/NERNNLayer.h" #include "arm_compute/runtime/NEON/functions/NEROIPoolingLayer.h" +#include "arm_compute/runtime/NEON/functions/NEReduceMean.h" #include "arm_compute/runtime/NEON/functions/NEReductionOperation.h" #include "arm_compute/runtime/NEON/functions/NERemap.h" #include "arm_compute/runtime/NEON/functions/NEReorgLayer.h" diff --git a/arm_compute/runtime/NEON/functions/NEReduceMean.h b/arm_compute/runtime/NEON/functions/NEReduceMean.h new file mode 100644 index 0000000000..b20ca9cc1b --- /dev/null +++ b/arm_compute/runtime/NEON/functions/NEReduceMean.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEON_REDUCE_MEAN_H__ +#define __ARM_COMPUTE_NEON_REDUCE_MEAN_H__ + +#include "arm_compute/runtime/IFunction.h" + +#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/MemoryGroup.h" +#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h" +#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h" + +namespace arm_compute +{ +class ITensor; + +/** Basic function to perform reduce operation */ +class NEReduceMean : public IFunction +{ +public: + /** Constructor */ + NEReduceMean(std::shared_ptr memory_manager = nullptr); + /** Configure kernel + * + * @note Supported tensor rank: up to 4 + * + * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32 + * @param[in] reduction_axis Reduction axis vector. + * @param[in] keep_dims If positive, retains reduced dimensions with length 1. + * @param[out] output Destination tensor. Data type supported: Same as @p input + */ + void configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, ITensor *output); + + /** Static function to check if given info will lead to a valid configuration of @ref NEReduceMean + * + * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32 + * @param[in] reduction_axis Reduction axis vector. + * @param[in] keep_dims If positive, retains reduced dimensions with length 1. + * @param[in] output Destination tensor. Data type supported: Same as @p input + * + * @return A status + */ + static Status validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output); + + // Inherited methods overridden: + void run() override; + +private: + MemoryGroup _memory_group; + std::unique_ptr _reduction_kernels{ nullptr }; + std::unique_ptr _reduced_outs{ nullptr }; + NEReshapeLayer _reshape; + unsigned int _reduction_ops; + bool _keep_dims; +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_NEON_REDUCE_MEAN_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEReductionOperation.h b/arm_compute/runtime/NEON/functions/NEReductionOperation.h index 02b29fb64e..5bc7059b62 100644 --- a/arm_compute/runtime/NEON/functions/NEReductionOperation.h +++ b/arm_compute/runtime/NEON/functions/NEReductionOperation.h @@ -47,16 +47,16 @@ public: NEReductionOperation(); /** Set the input and output tensors. * - * @param[in, out] input Source tensor. Data type supported: F32. Data layouts supported: NCHW. (Written to only for border_size != 0) - * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. - * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0 - * @param[in] op Reduction operation to perform. + * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. (Written to only for border_size != 0) + * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input. + * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0 + * @param[in] op Reduction operation to perform. */ void configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op); /** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperation. * - * @param[in] input Source tensor info. Data type supported: F32. Data layouts supported: NCHW. (Written to only for border_size != 0) + * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. (Written to only for border_size != 0) * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input. * @param[in] axis Dimension along which to reduce. Supported reduction axis : 0 * @param[in] op Reduction operation to perform. @@ -72,6 +72,7 @@ private: NEReductionOperationKernel _reduction_kernel; NEFillBorderKernel _fill_border_kernel; size_t _window_split; + int _reduction_axis; }; -} +} // namespace arm_compute #endif /* __ARM_COMPUTE_NEREDUCTIONOPERATION_H__ */ diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp index 30f21bbf33..b77219cd79 100644 --- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp +++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp @@ -32,10 +32,11 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" +#include "arm_compute/core/NEON/wrapper/wrapper.h" #include -using namespace arm_compute; - +namespace arm_compute +{ namespace { template @@ -57,31 +58,281 @@ public: Iterator in(input, in_slice); Iterator out(output, out_slice); - f(in, out, in_slice, out_slice); + f(in, out, in_slice, out_slice, *input->info()); + } + while(window.slide_window_slice_1D(in_slice) && out_window.slide_window_slice_1D(out_slice)); + } + static void reduceY(const Window &window, const ITensor *input, ITensor *output, F f) + { + // Set in window + Window in_window(window); + + in_window.set(Window::DimY, Window::Dimension(0, 1, 1)); + + // Get first input and output slices + Window in_slice = in_window.first_slice_window_2D(); + Window out_slice = window.first_slice_window_2D(); + + do + { + Iterator in(input, in_slice); + Iterator out(output, out_slice); + + f(in, out, in_slice, out_slice, *input->info(), 1); + } + while(in_window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice)); + } + static void reduceZ(const Window &window, const ITensor *input, ITensor *output, F f) + { + // Set in window + Window in_window(window); + + in_window.set(Window::DimZ, Window::Dimension(0, 1, 1)); + + // Get first input and output slices + Window in_slice = in_window.first_slice_window_3D(); + Window out_slice = window.first_slice_window_3D(); + + do + { + Iterator in(input, in_slice); + Iterator out(output, out_slice); + + f(in, out, in_slice, out_slice, *input->info(), 2); } - while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(out_slice)); + while(in_window.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice)); + } + static void reduceW(const Window &window, const ITensor *input, ITensor *output, F f) + { + // Set in/out window + Window in_window(window); + Window out_window(window); + + in_window.set(3, Window::Dimension(0, 1, 1)); + out_window.set(3, Window::Dimension(0, 1, 1)); + + // Get first input and output slices + Window in_slice = in_window.first_slice_window_4D(); + Window out_slice = out_window.first_slice_window_4D(); + + do + { + Iterator in(input, in_slice); + Iterator out(output, out_slice); + + f(in, out, in_slice, out_slice, *input->info(), 3); + } + while(in_window.slide_window_slice_4D(in_slice) && out_window.slide_window_slice_4D(out_slice)); } }; -struct SumsqOpX +template +struct RedOpX { - inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice) + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice, const TensorInfo &in_info) { ARM_COMPUTE_UNUSED(out_slice); - float32x4_t vec_sum_value = vdupq_n_f32(0.f); + auto vec_sum_value = wrapper::vdup_n(static_cast(0.f), ExactTagType{}); execute_window_loop(in_slice, [&](const Coordinates & id) { - const auto in_ptr = reinterpret_cast(input.ptr()); - const float32x4_t vec_elements = vld1q_f32(in_ptr); - vec_sum_value = vaddq_f32(vmulq_f32(vec_elements, vec_elements), vec_sum_value); + const auto in_ptr = reinterpret_cast(input.ptr()); + const auto vec_elements = wrapper::vloadq(in_ptr); + + if(op == ReductionOperation::SUM_SQUARE) + { + vec_sum_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_sum_value); + } + else + { + vec_sum_value = wrapper::vadd(vec_elements, vec_sum_value); + } }, input); - float32x2_t carry_addition = vpadd_f32(vget_high_f32(vec_sum_value), vget_low_f32(vec_sum_value)); - carry_addition = vpadd_f32(carry_addition, carry_addition); + auto carry_addition = wrapper::vpadd(wrapper::vgethigh(vec_sum_value), wrapper::vgetlow(vec_sum_value)); + carry_addition = wrapper::vpadd(carry_addition, carry_addition); + + auto res = wrapper::vgetlane(carry_addition, 0); + if(op == ReductionOperation::MEAN_SUM) + { + res /= in_info.dimension(0); + } - *(reinterpret_cast(output.ptr())) = vget_lane_f32(carry_addition, 0); + *(reinterpret_cast(output.ptr())) = res; + } +}; + +template +struct RedOpX_qasymm8 +{ + inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice, const TensorInfo &in_info) + { + ARM_COMPUTE_UNUSED(out_slice); + auto vec_sum_value1 = vdupq_n_u32(static_cast(0.f)); + auto vec_sum_value2 = vdupq_n_u32(static_cast(0.f)); + auto vec_sum_value3 = vdupq_n_u32(static_cast(0.f)); + auto vec_sum_value4 = vdupq_n_u32(static_cast(0.f)); + + execute_window_loop(in_slice, [&](const Coordinates & id) + { + const auto vec_elements = wrapper::vloadq(input.ptr()); + + const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements)); + const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements)); + + const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1)); + const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1)); + const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2)); + const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2)); + + vec_sum_value1 = wrapper::vadd(temp32x4t_1, vec_sum_value1); + vec_sum_value2 = wrapper::vadd(temp32x4t_2, vec_sum_value2); + vec_sum_value3 = wrapper::vadd(temp32x4t_3, vec_sum_value3); + vec_sum_value4 = wrapper::vadd(temp32x4t_4, vec_sum_value4); + }, + input); + + auto carry_addition = wrapper::vadd(vec_sum_value1, vec_sum_value2); + carry_addition = wrapper::vadd(carry_addition, vec_sum_value3); + carry_addition = wrapper::vadd(carry_addition, vec_sum_value4); + + auto carry_paddition = wrapper::vpadd(wrapper::vgethigh(carry_addition), wrapper::vgetlow(carry_addition)); + carry_paddition = wrapper::vpadd(carry_paddition, carry_paddition); + auto res = wrapper::vgetlane(carry_paddition, 0); + + if(op == ReductionOperation::MEAN_SUM) + { + res /= in_info.dimension(0); + } + + *(output.ptr()) = static_cast(res); + } +}; + +template +struct RedOpYZW +{ + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice, const TensorInfo &in_info, int axis) + { + ARM_COMPUTE_UNUSED(out_slice); + + execute_window_loop(in_slice, [&](const Coordinates & id) + { + auto vec_sum_value = wrapper::vdup_n(static_cast(0.f), ExactTagType{}); + for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim) + { + T *in_ptr; + switch(axis) + { + case 1: + in_ptr = reinterpret_cast(input.ptr() + in_info.offset_element_in_bytes(Coordinates(0, dim))); + break; + case 2: + in_ptr = reinterpret_cast(input.ptr() + in_info.offset_element_in_bytes(Coordinates(0, 0, dim))); + break; + case 3: + in_ptr = reinterpret_cast(input.ptr() + in_info.offset_element_in_bytes(Coordinates(0, 0, 0, dim))); + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + } + const auto vec_elements = wrapper::vloadq(in_ptr); + + if(op == ReductionOperation::SUM_SQUARE) + { + vec_sum_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_sum_value); + } + else + { + vec_sum_value = wrapper::vadd(vec_elements, vec_sum_value); + } + } + + if(op == ReductionOperation::MEAN_SUM) + { + auto vec_width_inv = wrapper::vinv(wrapper::vdup_n(static_cast(in_info.dimension(axis)), ExactTagType{})); + vec_sum_value = wrapper::vmul(vec_sum_value, vec_width_inv); + } + + wrapper::vstore(reinterpret_cast(output.ptr()), vec_sum_value); + }, + input, output); + } +}; + +template +struct RedOpYZW_qasymm8 +{ + inline void operator()(Iterator &input, Iterator &output, Window &in_slice, Window &out_slice, const TensorInfo &in_info, int axis) + { + ARM_COMPUTE_UNUSED(out_slice); + + execute_window_loop(in_slice, [&](const Coordinates & id) + { + auto vec_sum_value1 = vdupq_n_u32(static_cast(0.f)); + auto vec_sum_value2 = vdupq_n_u32(static_cast(0.f)); + auto vec_sum_value3 = vdupq_n_u32(static_cast(0.f)); + auto vec_sum_value4 = vdupq_n_u32(static_cast(0.f)); + for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim) + { + uint8_t *in_ptr; + switch(axis) + { + case 1: + in_ptr = input.ptr() + in_info.offset_element_in_bytes(Coordinates(0, dim)); + break; + case 2: + in_ptr = input.ptr() + in_info.offset_element_in_bytes(Coordinates(0, 0, dim)); + break; + case 3: + in_ptr = input.ptr() + in_info.offset_element_in_bytes(Coordinates(0, 0, 0, dim)); + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + } + const auto vec_elements = wrapper::vloadq(in_ptr); + + const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements)); + const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements)); + + const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1)); + const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1)); + const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2)); + const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2)); + + vec_sum_value1 = wrapper::vadd(temp32x4t_1, vec_sum_value1); + vec_sum_value2 = wrapper::vadd(temp32x4t_2, vec_sum_value2); + vec_sum_value3 = wrapper::vadd(temp32x4t_3, vec_sum_value3); + vec_sum_value4 = wrapper::vadd(temp32x4t_4, vec_sum_value4); + } + + if(op == ReductionOperation::MEAN_SUM) + { + const auto vec_width_inv = wrapper::vinv(vdupq_n_f32(in_info.dimension(axis))); + const auto vec_sum_value1_f = wrapper::vmul(vcvtq_f32_u32(vec_sum_value1), vec_width_inv); + const auto vec_sum_value2_f = wrapper::vmul(vcvtq_f32_u32(vec_sum_value2), vec_width_inv); + const auto vec_sum_value3_f = wrapper::vmul(vcvtq_f32_u32(vec_sum_value3), vec_width_inv); + const auto vec_sum_value4_f = wrapper::vmul(vcvtq_f32_u32(vec_sum_value4), vec_width_inv); + + vec_sum_value1 = vcvtq_u32_f32(vec_sum_value1_f); + vec_sum_value2 = vcvtq_u32_f32(vec_sum_value2_f); + vec_sum_value3 = vcvtq_u32_f32(vec_sum_value3_f); + vec_sum_value4 = vcvtq_u32_f32(vec_sum_value4_f); + } + + const auto temp16x8t_1 = vcombine_u16(wrapper::vqmovn(vec_sum_value1), wrapper::vqmovn(vec_sum_value2)); + const auto temp16x8t_2 = vcombine_u16(wrapper::vqmovn(vec_sum_value3), wrapper::vqmovn(vec_sum_value4)); + auto res = vcombine_u8(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2)); + wrapper::vstore(output.ptr(), res); + }, + input, output); } }; @@ -90,7 +341,186 @@ void reduce_sumsq(const Window &window, const ITensor *input, ITensor *output, u switch(axis) { case 0: - return Reducer::reduceX(window, input, output, SumsqOpX()); + switch(input->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceX(window, input, output, RedOpX()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceX(window, input, output, RedOpX()); + case DataType::QASYMM8: + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 1: + switch(input->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceY(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceY(window, input, output, RedOpYZW()); + case DataType::QASYMM8: + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 2: + switch(input->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceZ(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceZ(window, input, output, RedOpYZW()); + case DataType::QASYMM8: + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 3: + switch(input->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceW(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceW(window, input, output, RedOpYZW()); + case DataType::QASYMM8: + default: + ARM_COMPUTE_ERROR("Not supported"); + } + default: + ARM_COMPUTE_ERROR("Unsupported reduction axis"); + } +} + +void reduce_sum(const Window &window, const ITensor *input, ITensor *output, unsigned int axis) +{ + switch(axis) + { + case 0: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceX(window, input, output, RedOpX_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceX(window, input, output, RedOpX()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceX(window, input, output, RedOpX()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 1: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceY(window, input, output, RedOpYZW_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceY(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceY(window, input, output, RedOpYZW()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 2: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceZ(window, input, output, RedOpYZW_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceZ(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceZ(window, input, output, RedOpYZW()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 3: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceW(window, input, output, RedOpYZW_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceW(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceW(window, input, output, RedOpYZW()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + default: + ARM_COMPUTE_ERROR("Unsupported reduction axis"); + } +} +void reduce_mean_sum(const Window &window, const ITensor *input, ITensor *output, unsigned int axis) +{ + switch(axis) + { + case 0: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceX(window, input, output, RedOpX_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceX(window, input, output, RedOpX()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceX(window, input, output, RedOpX()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 1: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceY(window, input, output, RedOpYZW_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceY(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceY(window, input, output, RedOpYZW()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 2: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceZ(window, input, output, RedOpYZW_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceZ(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceZ(window, input, output, RedOpYZW()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } + case 3: + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + return Reducer>::reduceW(window, input, output, RedOpYZW_qasymm8()); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + return Reducer>::reduceW(window, input, output, RedOpYZW()); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + return Reducer>::reduceW(window, input, output, RedOpYZW()); + default: + ARM_COMPUTE_ERROR("Not supported"); + } default: ARM_COMPUTE_ERROR("Unsupported reduction axis"); } @@ -109,16 +539,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u ARM_COMPUTE_UNUSED(op); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() != DataLayout::NCHW); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 0, "Unsupported reduction axis, Supported axis is 0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis"); if(output->total_size() != 0) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(output->data_layout() != DataLayout::NCHW); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); const TensorShape output_shape = calculate_output_shape(input->tensor_shape(), axis); const TensorInfo tensor_info_reshaped = input->clone()->set_tensor_shape(output_shape); @@ -170,10 +599,11 @@ void NEReductionOperationKernel::configure(const ITensor *input, ITensor *output unsigned int num_elems_processed_per_iteration = 16 / data_size_from_type(input->info()->data_type()); - _input = input; - _output = output; - _border_size = (axis == 0) ? BorderSize(0, num_elems_processed_per_iteration - (input->info()->dimension(0) % num_elems_processed_per_iteration), 0, 0) : BorderSize(); - _op = op; + _input = input; + _output = output; + _border_size = (axis == 0) ? BorderSize(0, num_elems_processed_per_iteration - (input->info()->dimension(0) % num_elems_processed_per_iteration), 0, 0) : BorderSize(); + _op = op; + _reduction_axis = axis; // Configure kernel window auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis); @@ -202,7 +632,14 @@ void NEReductionOperationKernel::run(const Window &window, const ThreadInfo &inf case ReductionOperation::SUM_SQUARE: reduce_sumsq(window, _input, _output, _reduction_axis); break; + case ReductionOperation::MEAN_SUM: + reduce_mean_sum(window, _input, _output, _reduction_axis); + break; + case ReductionOperation::SUM: + reduce_sum(window, _input, _output, _reduction_axis); + break; default: ARM_COMPUTE_ERROR("Unsupported reduction operation."); } } +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEReduceMean.cpp b/src/runtime/NEON/functions/NEReduceMean.cpp new file mode 100644 index 0000000000..0b022df729 --- /dev/null +++ b/src/runtime/NEON/functions/NEReduceMean.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INNEUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY NEAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEReduceMean.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" + +using namespace arm_compute; + +NEReduceMean::NEReduceMean(std::shared_ptr memory_manager) + : _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), _reduction_ops(), _keep_dims() +{ +} + +Status NEReduceMean::validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output) +{ + ARM_COMPUTE_UNUSED(keep_dims); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions()); + + for(unsigned int i = 0; i < reduction_axis.num_dimensions(); ++i) + { + if(output->total_size() > 0) + { + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(reduction_axis[i]) != 1); + ARM_COMPUTE_RETURN_ERROR_ON(static_cast(reduction_axis[i]) > input->num_dimensions() - 1); + } + + ARM_COMPUTE_RETURN_ON_ERROR(NEReductionOperationKernel::validate(input, output, reduction_axis[i], ReductionOperation::MEAN_SUM)); + } + + return Status{}; +} + +void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, ITensor *output) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input); + + _reduction_ops = reduction_axis.num_dimensions(); + _reduction_kernels = arm_compute::support::cpp14::make_unique(_reduction_ops); + _reduced_outs = arm_compute::support::cpp14::make_unique(_reduction_ops - (keep_dims ? 1 : 0)); + _keep_dims = keep_dims; + + // Perform reduction for every axis + for(unsigned int i = 0; i < _reduction_ops; ++i) + { + TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape(); + out_shape.set(reduction_axis[i], 1); + auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1); + + if(i == _reduction_ops - 1 && keep_dims) + { + _reduction_kernels[i].configure(in, output, reduction_axis[i], ReductionOperation::MEAN_SUM); + } + else + { + _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type())); + _memory_group.manage(_reduced_outs.get() + i); + _reduction_kernels[i].configure(in, _reduced_outs.get() + i, reduction_axis[i], ReductionOperation::MEAN_SUM); + } + } + + // Allocate intermediate tensors + for(unsigned int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i) + { + _reduced_outs[i].allocator()->allocate(); + } + + // Configure reshape layer if we want to drop the dimensions + if(!keep_dims) + { + TensorShape out_shape = input->info()->tensor_shape(); + for(unsigned int i = 0; i < _reduction_ops; ++i) + { + out_shape.remove_dimension(reduction_axis[i]); + } + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape)); + _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output); + } +} + +void NEReduceMean::run() +{ + _memory_group.acquire(); + + for(unsigned int i = 0; i < _reduction_ops; ++i) + { + _reduction_kernels[i].run(); + } + + if(!_keep_dims) + { + _reshape.run(); + } + _memory_group.release(); +} diff --git a/src/runtime/NEON/functions/NEReductionOperation.cpp b/src/runtime/NEON/functions/NEReductionOperation.cpp index cd0b42fbe3..188c2bbb18 100644 --- a/src/runtime/NEON/functions/NEReductionOperation.cpp +++ b/src/runtime/NEON/functions/NEReductionOperation.cpp @@ -26,8 +26,8 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -using namespace arm_compute; - +namespace arm_compute +{ namespace { /** Define dimension to split the window @@ -42,6 +42,10 @@ size_t reduction_window_split_dimension(unsigned int axis) { case 0: return Window::DimY; + case 1: + case 2: + case 3: + return Window::DimX; default: ARM_COMPUTE_ERROR("Unsupported reduction axis"); } @@ -59,7 +63,7 @@ BorderMode reduction_operation_border_mode(ReductionOperation op) } // namespace NEReductionOperation::NEReductionOperation() - : _reduction_kernel(), _fill_border_kernel(), _window_split(0) + : _reduction_kernel(), _fill_border_kernel(), _window_split(0), _reduction_axis() { } @@ -72,20 +76,28 @@ Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInf void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); // Configure reduction kernel _reduction_kernel.configure(input, output, axis, op); - _window_split = reduction_window_split_dimension(axis); + _window_split = reduction_window_split_dimension(axis); + _reduction_axis = axis; - // Configure fill border kernel - BorderSize fill_border_size = (axis == 0) ? _reduction_kernel.border_size() : BorderSize(); - BorderMode fill_border_mode = reduction_operation_border_mode(op); - _fill_border_kernel.configure(input, fill_border_size, fill_border_mode, PixelValue(static_cast(0.f))); + if(axis == 0) + { + // Configure fill border kernel + BorderSize fill_border_size = (axis == 0) ? _reduction_kernel.border_size() : BorderSize(); + BorderMode fill_border_mode = reduction_operation_border_mode(op); + _fill_border_kernel.configure(input, fill_border_size, fill_border_mode, PixelValue(static_cast(0.f))); + } } void NEReductionOperation::run() { - NEScheduler::get().schedule(&_fill_border_kernel, Window::DimY); + if(_reduction_axis == 0) + { + NEScheduler::get().schedule(&_fill_border_kernel, Window::DimY); + } NEScheduler::get().schedule(&_reduction_kernel, _window_split); } +} // namespace arm_compute diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp index 35cb0c51e1..516a1341cc 100644 --- a/tests/validation/CL/ReductionOperation.cpp +++ b/tests/validation/CL/ReductionOperation.cpp @@ -84,7 +84,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( // *INDENT-ON* template -using CLReductionOperationFixture = ReductionOperationValidationFixture; +using CLReductionOperationFixture = ReductionOperationFixture; TEST_SUITE(Float) TEST_SUITE(FP16) diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp new file mode 100644 index 0000000000..3cd7ce362e --- /dev/null +++ b/tests/validation/NEON/ReduceMean.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEReduceMean.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" + +#include "tests/NEON/Accessor.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/datasets/SplitDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ReduceMeanFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ + +const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }), + framework::dataset::make("KeepDims", { true })); +const auto axis_drop = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1), Coordinates(3) }), framework::dataset::make("KeepDims", { false })); +} // namespace +TEST_SUITE(NEON) +TEST_SUITE(ReduceMean) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid axis + TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid output shape + TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32) + }), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32) + })), + framework::dataset::make("Axis", { Coordinates(4), Coordinates(0,2), Coordinates(2) })), + framework::dataset::make("Expected", { false, false, true })), + input_info, output_info, axis, expected) +{ + const Status status = NEReduceMean::validate(&input_info.clone()->set_is_resizable(false), axis, true, &output_info.clone()->set_is_resizable(false)); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +DATA_TEST_CASE(Configuration, + framework::DatasetMode::ALL, + combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })), + shape, data_type) +{ + // Create tensors + Tensor ref_src = create_tensor(shape, data_type); + Tensor dst; + + Coordinates axis(1); + + // Create and Configure function + NEReduceMean reduce_mean; + reduce_mean.configure(&ref_src, axis, true, &dst); + + // Validate valid region + TensorShape output_shape = shape; + output_shape.set(1, 1); + const ValidRegion valid_region = shape_to_valid_region(output_shape); + validate(dst.info()->valid_region(), valid_region); +} + +template +using NEReduceMeanFixture = ReduceMeanFixture; + +TEST_SUITE(Float) + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEReduceMeanFixture, + framework::DatasetMode::PRECOMMIT, + combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEReduceMeanFixture, + framework::DatasetMode::NIGHTLY, + combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} +TEST_SUITE_END() // FP16 +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEReduceMeanFixture, + framework::DatasetMode::PRECOMMIT, + combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), concat(axis_keep, axis_drop))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEReduceMeanFixture, + framework::DatasetMode::NIGHTLY, + combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), concat(axis_keep, axis_drop))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // Float + +template +using NEReduceMeanQuantizedFixture = ReduceMeanQuantizedFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEReduceMeanQuantizedFixture, + framework::DatasetMode::PRECOMMIT, + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEReduceMeanQuantizedFixture, + framework::DatasetMode::NIGHTLY, + combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized +TEST_SUITE_END() // ReduceMean +TEST_SUITE_END() // NEON +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/NEON/ReductionOperation.cpp b/tests/validation/NEON/ReductionOperation.cpp index b0480b0bc6..2a381bfa58 100644 --- a/tests/validation/NEON/ReductionOperation.cpp +++ b/tests/validation/NEON/ReductionOperation.cpp @@ -45,6 +45,8 @@ namespace { /** Tolerance for float operations */ RelativeTolerance tolerance_f32(0.00001f); +/** Tolerance for quantized operations */ +RelativeTolerance tolerance_qasymm8(1); } // namespace TEST_SUITE(NEON) @@ -81,25 +83,47 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( // *INDENT-ON* template -using NEReductionOperationFixture = ReductionOperationValidationFixture; +using NEReductionOperationFixture = ReductionOperationFixture; TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Op", { ReductionOperation::SUM_SQUARE }))) + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), datasets::ReductionOperations())) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Op", { ReductionOperation::SUM_SQUARE }))) + combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), datasets::ReductionOperations())) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); } -TEST_SUITE_END() +TEST_SUITE_END() // FP32 -TEST_SUITE_END() -TEST_SUITE_END() +template +using NEReductionOperationQuantizedFixture = ReductionOperationQuantizedFixture; + +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationQuantizedFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), + datasets::ReductionOperations()), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationQuantizedFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), + datasets::ReductionOperations()), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE_END() // ReductionOperation +TEST_SUITE_END() // NEON } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/fixtures/ReductionOperationFixture.h b/tests/validation/fixtures/ReductionOperationFixture.h index 0dee7eb707..9079b47cbb 100644 --- a/tests/validation/fixtures/ReductionOperationFixture.h +++ b/tests/validation/fixtures/ReductionOperationFixture.h @@ -45,26 +45,36 @@ class ReductionOperationValidationFixture : public framework::Fixture { public: template - void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op) + void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info) { const TensorShape output_shape = get_output_shape(shape, axis); - _target = compute_target(shape, output_shape, data_type, axis, op); - _reference = compute_reference(shape, output_shape, data_type, axis, op); + _target = compute_target(shape, output_shape, data_type, axis, op, quantization_info); + _reference = compute_reference(shape, output_shape, data_type, axis, op, quantization_info); } protected: template void fill(U &&tensor) { - std::uniform_real_distribution<> distribution(-1.0f, 1.0f); - library->fill(tensor, distribution, 0); + if(!is_data_type_quantized(tensor.data_type())) + { + std::uniform_real_distribution<> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, 0); + } + else + { + std::pair bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution distribution(bounds.first, bounds.second); + + library->fill(tensor, distribution, 0); + } } - TensorType compute_target(const TensorShape &src_shape, const TensorShape &dst_shape, DataType data_type, unsigned int axis, ReductionOperation op) + TensorType compute_target(const TensorShape &src_shape, const TensorShape &dst_shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info) { // Create tensors - TensorType src = create_tensor(src_shape, data_type); - TensorType dst = create_tensor(dst_shape, data_type); + TensorType src = create_tensor(src_shape, data_type, 1, quantization_info); + TensorType dst = create_tensor(dst_shape, data_type, 1, quantization_info); // Create and configure function FunctionType reduction_func; @@ -89,10 +99,10 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &src_shape, const TensorShape &dst_shape, DataType data_type, unsigned int axis, ReductionOperation op) + SimpleTensor compute_reference(const TensorShape &src_shape, const TensorShape &dst_shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info) { // Create reference - SimpleTensor src{ src_shape, data_type }; + SimpleTensor src{ src_shape, data_type, 1, quantization_info }; // Fill reference fill(src); @@ -111,6 +121,28 @@ private: return output_shape; } }; + +template +class ReductionOperationQuantizedFixture : public ReductionOperationValidationFixture +{ +public: + template + void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info = QuantizationInfo()) + { + ReductionOperationValidationFixture::setup(shape, data_type, axis, op, quantization_info); + } +}; + +template +class ReductionOperationFixture : public ReductionOperationValidationFixture +{ +public: + template + void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op) + { + ReductionOperationValidationFixture::setup(shape, data_type, axis, op, QuantizationInfo()); + } +}; } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp index 11947bd293..499263f11e 100644 --- a/tests/validation/reference/ReductionOperation.cpp +++ b/tests/validation/reference/ReductionOperation.cpp @@ -76,7 +76,7 @@ template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op) { // Create reference - SimpleTensor dst{ dst_shape, src.data_type() }; + SimpleTensor dst{ dst_shape, src.data_type(), 1, src.quantization_info() }; const unsigned int src_width = src.shape().x(); const unsigned int src_height = src.shape().y(); const unsigned int src_depth = src.shape().z(); @@ -102,7 +102,7 @@ SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShap { res /= src_width; } - dst[du] = static_cast(res); + dst[du] = saturate_cast(res); } else { @@ -136,7 +136,7 @@ SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShap { res /= src_height; } - dst[du * src_width + x] = static_cast(res); + dst[du * src_width + x] = saturate_cast(res); } else { @@ -175,7 +175,7 @@ SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShap { res /= src_depth; } - dst[du * src_width * src_height + y * src_width + x] = static_cast(res); + dst[du * src_width * src_height + y * src_width + x] = saturate_cast(res); } else { @@ -218,7 +218,7 @@ SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShap res /= src_batch; } - dst[du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x] = static_cast(res); + dst[du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x] = saturate_cast(res); } else { -- cgit v1.2.1