aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-05-15 15:30:47 +0100
committerManuel Bottini <manuel.bottini@arm.com>2019-07-17 10:19:00 +0000
commited753266948314922ee56b0d4a3e801264011a12 (patch)
tree24c509710ed2a7082f6ccecdc7ed20c6ae314595
parent2ea3761416aab259d9d84620dba2e011bcb5d880 (diff)
downloadComputeLibrary-ed753266948314922ee56b0d4a3e801264011a12.tar.gz
COMPMID-2283: Implement SIN operator for NEON
Change-Id: I31ee0e7c9a30540cfd2cad76993afb66abfccc4d Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1169 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
-rw-r--r--arm_compute/core/NEON/NEMath.h25
-rw-r--r--arm_compute/core/NEON/NEMath.inl123
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/sin.h48
-rw-r--r--arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h20
-rw-r--r--src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp14
-rw-r--r--src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp11
-rw-r--r--tests/benchmark/NEON/ElementwiseSin.cpp73
-rw-r--r--tests/benchmark/fixtures/ElementWiseUnaryFixture.h144
-rw-r--r--tests/validation/NEON/ElementwiseSin.cpp115
-rw-r--r--tests/validation/fixtures/ElementWiseUnaryFixture.h8
11 files changed, 575 insertions, 7 deletions
diff --git a/arm_compute/core/NEON/NEMath.h b/arm_compute/core/NEON/NEMath.h
index 59a03c9d11..560abd6cdc 100644
--- a/arm_compute/core/NEON/NEMath.h
+++ b/arm_compute/core/NEON/NEMath.h
@@ -146,6 +146,22 @@ int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent);
*/
int32_t rounding_divide_by_pow2(int32_t x, int exponent);
+/** Calculate sine.
+ *
+ * @param[in] val Input vector value in radians, F32 format.
+ *
+ * @return The calculated sine.
+ */
+float32x4_t vsinq_f32(float32x4_t val);
+
+/** Calculate sine.
+ *
+ * @param[in] val Input vector value in radians, F32 format.
+ *
+ * @return The calculated sine.
+ */
+float32x2_t vsin_f32(float32x2_t val);
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
/** Calculate hyperbolic tangent.
*
@@ -217,6 +233,15 @@ float16x8_t vexpq_f16(float16x8_t x);
* @return The calculated power.
*/
float16x8_t vpowq_f16(float16x8_t val, float16x8_t n);
+
+/** Calculate sine.
+ *
+ * @param[in] val Input vector value in radians, F16 format.
+ *
+ * @return The calculated sine.
+ */
+float16x8_t vsinq_f16(float16x8_t val);
+
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace arm_compute
#include "arm_compute/core/NEON/NEMath.inl"
diff --git a/arm_compute/core/NEON/NEMath.inl b/arm_compute/core/NEON/NEMath.inl
index 2247c14f47..eebcdf864f 100644
--- a/arm_compute/core/NEON/NEMath.inl
+++ b/arm_compute/core/NEON/NEMath.inl
@@ -22,6 +22,8 @@
* SOFTWARE.
*/
+#include <cmath>
+
namespace arm_compute
{
/** Exponent polynomial coefficients */
@@ -54,6 +56,12 @@ const std::array<float32x4_t, 8> log_tab =
}
};
+/** Sin polynomial coefficients */
+constexpr float te_sin_coeff2 = 0.166666666666f; // 1/(2*3)
+constexpr float te_sin_coeff3 = 0.05f; // 1/(4*5)
+constexpr float te_sin_coeff4 = 0.023809523810f; // 1/(6*7)
+constexpr float te_sin_coeff5 = 0.013888888889f; // 1/(8*9)
+
#ifndef DOXYGEN_SKIP_THIS
inline float32x4_t vfloorq_f32(float32x4_t val)
{
@@ -190,6 +198,97 @@ inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n)
{
return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
}
+
+inline float32x4_t vsinq_f32(float32x4_t val)
+{
+ const float32x4_t pi_v = vdupq_n_f32(M_PI);
+ const float32x4_t pio2_v = vdupq_n_f32(M_PI / 2);
+ const float32x4_t ipi_v = vdupq_n_f32(1 / M_PI);
+
+ //Find positive or negative
+ const int32x4_t c_v = vabsq_s32(vcvtq_s32_f32(vmulq_f32(val, ipi_v)));
+ const uint32x4_t sign_v = vcleq_f32(val, vdupq_n_f32(0));
+ const uint32x4_t odd_v = vandq_u32(vreinterpretq_u32_s32(c_v), vdupq_n_u32(1));
+
+ uint32x4_t neg_v = veorq_u32(odd_v, sign_v);
+
+ //Modulus a - (n * int(a*(1/n)))
+ float32x4_t ma = vsubq_f32(vabsq_f32(val), vmulq_f32(pi_v, vcvtq_f32_s32(c_v)));
+ const uint32x4_t reb_v = vcgeq_f32(ma, pio2_v);
+
+ //Rebase a between 0 and pi/2
+ ma = vbslq_f32(reb_v, vsubq_f32(pi_v, ma), ma);
+
+ //Taylor series
+ const float32x4_t ma2 = vmulq_f32(ma, ma);
+
+ //2nd elem: x^3 / 3!
+ float32x4_t elem = vmulq_f32(vmulq_f32(ma, ma2), vdupq_n_f32(te_sin_coeff2));
+ float32x4_t res = vsubq_f32(ma, elem);
+
+ //3rd elem: x^5 / 5!
+ elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff3));
+ res = vaddq_f32(res, elem);
+
+ //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val)
+ elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff4));
+ res = vsubq_f32(res, elem);
+
+ //5th elem: x^9 / 9!
+ elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff5));
+ res = vaddq_f32(res, elem);
+
+ //Change of sign
+ neg_v = vshlq_n_u32(neg_v, 31);
+ res = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(res), neg_v));
+ return res;
+}
+
+inline float32x2_t vsin_f32(float32x2_t val)
+{
+ const float32x2_t pi_v = vdup_n_f32(M_PI);
+ const float32x2_t pio2_v = vdup_n_f32(M_PI / 2);
+ const float32x2_t ipi_v = vdup_n_f32(1 / M_PI);
+
+ //Find positive or negative
+ const int32x2_t c_v = vabs_s32(vcvt_s32_f32(vmul_f32(val, ipi_v)));
+ const uint32x2_t sign_v = vcle_f32(val, vdup_n_f32(0));
+ const uint32x2_t odd_v = vand_u32(vreinterpret_u32_s32(c_v), vdup_n_u32(1));
+
+ uint32x2_t neg_v = veor_u32(odd_v, sign_v);
+
+ //Modulus a - (n * int(a*(1/n)))
+ float32x2_t ma = vsub_f32(vabs_f32(val), vmul_f32(pi_v, vcvt_f32_s32(c_v)));
+ const uint32x2_t reb_v = vcge_f32(ma, pio2_v);
+
+ //Rebase a between 0 and pi/2
+ ma = vbsl_f32(reb_v, vsub_f32(pi_v, ma), ma);
+
+ //Taylor series
+ const float32x2_t ma2 = vmul_f32(ma, ma);
+
+ //2nd elem: x^3 / 3!
+ float32x2_t elem = vmul_f32(vmul_f32(ma, ma2), vdup_n_f32(te_sin_coeff2));
+ float32x2_t res = vsub_f32(ma, elem);
+
+ //3rd elem: x^5 / 5!
+ elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff3));
+ res = vadd_f32(res, elem);
+
+ //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val)
+ elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff4));
+ res = vsub_f32(res, elem);
+
+ //5th elem: x^9 / 9!
+ elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff5));
+ res = vadd_f32(res, elem);
+
+ //Change of sign
+ neg_v = vshl_n_u32(neg_v, 31);
+ res = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(res), neg_v));
+ return res;
+}
+
#endif /* DOXYGEN_SKIP_THIS */
inline int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent)
@@ -318,6 +417,30 @@ inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
return vcombine_f16(vcvt_f16_f32(res0_f32), vcvt_f16_f32(res1_f32));
}
+
+inline float16x8_t vsinq_f16(float16x8_t val)
+{
+ const float32x4_t val_high = vcvt_f32_f16(vget_high_f16(val));
+ const float32x4_t val_low = vcvt_f32_f16(vget_low_f16(val));
+
+ const float32x4_t res_high = vsinq_f32(val_high);
+ const float32x4_t res_low = vsinq_f32(val_low);
+
+ return vcombine_f16(vcvt_f16_f32(res_low), vcvt_f16_f32(res_high));
+}
+
+inline float16x4_t vsin_f16(float16x4_t val)
+{
+ const float32x4_t val_f32 = vcvt_f32_f16(val);
+ const float32x2_t val_high = vget_high_f32(val_f32);
+ const float32x2_t val_low = vget_low_f32(val_f32);
+
+ const float32x2_t res_high = vsin_f32(val_high);
+ const float32x2_t res_low = vsin_f32(val_low);
+
+ return vcvt_f16_f32(vcombine_f32(res_low, res_high));
+}
+
#endif /* DOXYGEN_SKIP_THIS */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace arm_compute
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index c9dbb2fa81..0362ca125f 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -59,6 +59,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/rev64.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/round.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/setlane.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/sin.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/sub.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/tanh.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sin.h b/arm_compute/core/NEON/wrapper/intrinsics/sin.h
new file mode 100644
index 0000000000..da98876e11
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/sin.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_SIN_H__
+#define __ARM_COMPUTE_WRAPPER_SIN_H__
+
+#include "arm_compute/core/NEON/NEMath.h"
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSIN_IMPL(vtype, prefix, postfix) \
+ inline vtype vsin(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VSIN_IMPL(float32x4_t, vsinq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSIN_IMPL(float16x8_t, vsinq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef vsub_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_SUB_H__ */ \ No newline at end of file
diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h b/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h
index 085e42d06d..22b3bbeeba 100644
--- a/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h
@@ -149,5 +149,25 @@ public:
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
};
+
+/** Basic function to compute the sine of an input tensor. */
+class NESinLayer : public INESimpleFunction
+{
+public:
+ /** Initialize the function
+ *
+ * @param[in] input Input tensor. Data types supported: F16/F32.
+ * @param[out] output Output tensor. Data types supported: same as @p input.
+ */
+ void configure(const ITensor *input, ITensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref NESinLayer
+ *
+ * @param[in] input First tensor input info. Data types supported: F16/F32.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEELEMENTWISEUNARYLAYER_H__ */
diff --git a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp
index 45f0fedebb..5d3af3b03d 100644
--- a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp
+++ b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp
@@ -65,13 +65,15 @@ inline ScalarType elementwise_op_scalar(const ScalarType &a)
return std::abs(a);
case ElementWiseUnary::ROUND:
return support::cpp11::nearbyint(a);
+ case ElementWiseUnary::SIN:
+ return std::sin(a);
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
}
}
/* Elementwise operations that are supported for float */
-template <ElementWiseUnary op, bool is_float, typename VectorType, typename std::enable_if<is_float, int>::type = 0>
+template <ElementWiseUnary op, typename ScalarType, bool is_float, typename VectorType, typename std::enable_if<is_float, int>::type = 0>
inline VectorType elementwise_op(const VectorType &a)
{
switch(op)
@@ -88,13 +90,15 @@ inline VectorType elementwise_op(const VectorType &a)
return wrapper::vabs(a);
case ElementWiseUnary::ROUND:
return wrapper::vround(a);
+ case ElementWiseUnary::SIN:
+ return wrapper::vsin(a);
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
}
}
/* Elementwise operations that are supported for non floats */
-template < ElementWiseUnary op, bool is_float, typename VectorType, typename std::enable_if < !is_float, int >::type = 0 >
+template < ElementWiseUnary op, typename ScalarType, bool is_float, typename VectorType, typename std::enable_if < !is_float, int >::type = 0 >
inline VectorType elementwise_op(const VectorType &a)
{
switch(op)
@@ -129,7 +133,7 @@ void elementwise_op(const ITensor *in, ITensor *out, const Window &window)
int x = window_start_x;
for(; x <= window_end_x - window_step_x; x += window_step_x)
{
- wrapper::vstore(output_ptr + x, elementwise_op<op, is_float>(wrapper::vloadq(input_ptr + x)));
+ wrapper::vstore(output_ptr + x, elementwise_op<op, ScalarType, is_float>(wrapper::vloadq(input_ptr + x)));
}
for(; x < window_end_x; ++x)
{
@@ -215,6 +219,9 @@ void NEElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensor *inp
case ElementWiseUnary::ROUND:
_function = configure_func<ElementWiseUnary::ROUND>(input, output);
break;
+ case ElementWiseUnary::SIN:
+ _function = configure_func<ElementWiseUnary::SIN>(input, output);
+ break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
}
@@ -229,6 +236,7 @@ Status NEElementwiseUnaryKernel::validate_arguments(ElementWiseUnary op, const I
case ElementWiseUnary::RSQRT:
case ElementWiseUnary::LOG:
case ElementWiseUnary::ROUND:
+ case ElementWiseUnary::SIN:
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32);
break;
case ElementWiseUnary::NEG:
diff --git a/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp b/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp
index e4c9101274..4b44a05a64 100644
--- a/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp
@@ -96,4 +96,15 @@ Status NERoundLayer::validate(const ITensorInfo *input, const ITensorInfo *outpu
return NEElementwiseUnaryKernel::validate(ElementWiseUnary::ROUND, input, output);
}
+void NESinLayer::configure(const ITensor *input, ITensor *output)
+{
+ auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ k->configure(ElementWiseUnary::SIN, input, output);
+ _kernel = std::move(k);
+}
+Status NESinLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+{
+ return NEElementwiseUnaryKernel::validate(ElementWiseUnary::SIN, input, output);
+}
+
} // namespace arm_compute
diff --git a/tests/benchmark/NEON/ElementwiseSin.cpp b/tests/benchmark/NEON/ElementwiseSin.cpp
new file mode 100644
index 0000000000..58b58dc1bd
--- /dev/null
+++ b/tests/benchmark/NEON/ElementwiseSin.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/benchmark/fixtures/ElementWiseUnaryFixture.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace benchmark
+{
+TEST_SUITE(NEON)
+TEST_SUITE(SinLayer)
+
+template <typename T>
+using NESinLayerFixture = SinBenchmarkFixture<Tensor, Accessor, NESinLayer, T>;
+
+TEST_SUITE(Float)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+REGISTER_FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+ DataType::F16)));
+
+REGISTER_FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+ DataType::F16)));
+
+TEST_SUITE_END() // FP16
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+TEST_SUITE(FP32)
+REGISTER_FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+ DataType::F32)));
+
+REGISTER_FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+ DataType::F32)));
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+
+TEST_SUITE_END() // SinLayer
+TEST_SUITE_END() // NEON
+} // namespace benchmark
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/benchmark/fixtures/ElementWiseUnaryFixture.h b/tests/benchmark/fixtures/ElementWiseUnaryFixture.h
new file mode 100644
index 0000000000..e4f76a441f
--- /dev/null
+++ b/tests/benchmark/fixtures/ElementWiseUnaryFixture.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ElementWiseUnary.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace benchmark
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementWiseUnaryBenchmarkFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, DataType input_data_type, ElementWiseUnary op)
+ {
+ src = create_tensor<TensorType>(input_shape, input_data_type);
+ dst = create_tensor<TensorType>(input_shape, input_data_type);
+
+ elwiseunary_layer.configure(&src, &dst);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+ }
+
+ void run()
+ {
+ elwiseunary_layer.run();
+ }
+
+ void sync()
+ {
+ sync_if_necessary<TensorType>();
+ sync_tensor_if_necessary<TensorType>(dst);
+ }
+
+private:
+ TensorType src{};
+ TensorType dst{};
+ FunctionType elwiseunary_layer{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RsqrtBenchmarkFixture : public ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::RSQRT);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ExpBenchmarkFixture : public ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::EXP);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegBenchmarkFixture : public ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::NEG);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogBenchmarkFixture : public ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::LOG);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AbsBenchmarkFixture : public ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::ABS);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SinBenchmarkFixture : public ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type)
+ {
+ ElementWiseUnaryBenchmarkFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, ElementWiseUnary::SIN);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE */
diff --git a/tests/validation/NEON/ElementwiseSin.cpp b/tests/validation/NEON/ElementwiseSin.cpp
new file mode 100644
index 0000000000..c68d1e5612
--- /dev/null
+++ b/tests/validation/NEON/ElementwiseSin.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ElementWiseUnaryFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+AbsoluteTolerance<float> tolerance_fp32(0.00001f);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+AbsoluteTolerance<float> tolerance_fp16(0.0005f);
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+} // namespace
+TEST_SUITE(NEON)
+TEST_SUITE(SinLayer)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), shape, data_type)
+{
+ // Create tensors
+ Tensor src = create_tensor<Tensor>(shape, data_type);
+ Tensor dst = create_tensor<Tensor>(shape, data_type);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NESinLayer sin_layer;
+ sin_layer.configure(&src, &dst);
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(src.info()->valid_region(), valid_region);
+ validate(dst.info()->valid_region(), valid_region);
+}
+
+template <typename T>
+using NESinLayerFixture = SinValidationFixture<Tensor, Accessor, NESinLayer, T>;
+
+TEST_SUITE(Float)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+ DataType::F16)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+ DataType::F16)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+
+TEST_SUITE_END() // FP16
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+
+TEST_SUITE_END() // SinLayer
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/ElementWiseUnaryFixture.h b/tests/validation/fixtures/ElementWiseUnaryFixture.h
index fd66f630ba..3f6d5b3cb3 100644
--- a/tests/validation/fixtures/ElementWiseUnaryFixture.h
+++ b/tests/validation/fixtures/ElementWiseUnaryFixture.h
@@ -44,11 +44,11 @@ class ElementWiseUnaryValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type, ElementWiseUnary op)
+ void setup(TensorShape input_shape, DataType input_data_type, ElementWiseUnary op)
{
_op = op;
- _target = compute_target(shape, data_type);
- _reference = compute_reference(shape, data_type);
+ _target = compute_target(input_shape, input_data_type);
+ _reference = compute_reference(input_shape, input_data_type);
}
protected:
@@ -100,7 +100,7 @@ protected:
}
case ElementWiseUnary::SIN:
{
- std::uniform_real_distribution<> distribution(100.0f, -100.0f);
+ std::uniform_real_distribution<> distribution(-100.00f, 100.00f);
library->fill(tensor, distribution, i);
break;
}