From f6e475c9a092bc6e0fb53f484fbf2832183a9c44 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Fri, 10 May 2019 12:06:28 +0100 Subject: COMPMID-2268: Implement NEG for NEON. Change-Id: I90c023dbea8ea12e9af677294ba576b2bfcc02a4 Signed-off-by: Usama Arif Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/184216 Tested-by: bsgcomp Comments-Addressed: bsgcomp Reviewed-by: Pablo Tello Reviewed-on: https://review.mlplatform.org/c/1099 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Comments-Addressed: Arm Jenkins --- .../core/NEON/kernels/NEElementwiseUnaryKernel.h | 2 +- arm_compute/core/Types.h | 1 + .../NEON/functions/NEElementwiseUnaryLayer.h | 20 ++++ src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp | 53 ++++++-- .../NEON/functions/NEElementwiseUnaryLayer.cpp | 12 ++ tests/validation/NEON/ElementwiseNegation.cpp | 133 +++++++++++++++++++++ .../validation/fixtures/ElementWiseUnaryFixture.h | 41 ++++++- tests/validation/reference/ElementWiseUnary.cpp | 6 +- 8 files changed, 251 insertions(+), 17 deletions(-) create mode 100644 tests/validation/NEON/ElementwiseNegation.cpp diff --git a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h index f632b1a93a..2c019b52f0 100644 --- a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h +++ b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h @@ -88,7 +88,7 @@ public: protected: // Inherited methods overridden: - static Status validate_arguments(const ITensorInfo &input, const ITensorInfo &output); + static Status validate_arguments(ElementWiseUnary op, const ITensorInfo &input, const ITensorInfo &output); /** Function to use for the particular tensor types passed to configure() */ std::function _function; diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index a2dfbb7d82..544ebff410 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -578,6 +578,7 @@ enum class ElementWiseUnary { RSQRT, /**< Reverse square root */ EXP, /**< Exponential */ + NEG, /**< Negate */ }; /** The normalization type used for the normalization layer */ diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h b/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h index 76827903fa..f4b7e89889 100644 --- a/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h +++ b/arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h @@ -69,5 +69,25 @@ public: */ static Status validate(const ITensorInfo *input, const ITensorInfo *output); }; + +/** Basic function to negate an input tensor. */ +class NENegLayer : public INESimpleFunction +{ +public: + /** Initialize the function + * + * @param[in] input Input tensor. Data types supported: F16/F32/S32. + * @param[out] output Output tensor. Data types supported: same as @p input. + */ + void configure(const ITensor *input, ITensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref NENegLayer + * + * @param[in] input First tensor input info. Data types supported: F16/F32/S32. + * @param[in] output Output tensor info. Data types supported: Same as @p input. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); +}; } // namespace arm_compute #endif /* __ARM_COMPUTE_NEELEMENTWISEUNARYLAYER_H__ */ diff --git a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp index 34696d872a..d62b165727 100644 --- a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp +++ b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp @@ -55,12 +55,15 @@ inline ScalarType elementwise_op_scalar(const ScalarType &a) return 1 / sqrt(a); case ElementWiseUnary::EXP: return std::exp(a); + case ElementWiseUnary::NEG: + return -a; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } } -template +/* Elementwise operations that are supported for float */ +template ::type = 0> inline VectorType elementwise_op(const VectorType &a) { switch(op) @@ -69,12 +72,27 @@ inline VectorType elementwise_op(const VectorType &a) return wrapper::vinvsqrt(a); case ElementWiseUnary::EXP: return wrapper::vexpq(a); + case ElementWiseUnary::NEG: + return wrapper::vneg(a); default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } } -template +/* Elementwise operations that are supported for non floats */ +template ::type = 0> +inline VectorType elementwise_op(const VectorType &a) +{ + switch(op) + { + case ElementWiseUnary::NEG: + return wrapper::vneg(a); + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } +} + +template void elementwise_op(const ITensor *in, ITensor *out, const Window &window) { const int window_step_x = 16 / sizeof(ScalarType); @@ -95,7 +113,7 @@ void elementwise_op(const ITensor *in, ITensor *out, const Window &window) int x = window_start_x; for(; x <= window_end_x - window_step_x; x += window_step_x) { - wrapper::vstore(output_ptr + x, elementwise_op(wrapper::vloadq(input_ptr + x))); + wrapper::vstore(output_ptr + x, elementwise_op(wrapper::vloadq(input_ptr + x))); } for(; x < window_end_x; ++x) { @@ -115,10 +133,11 @@ configure_func(const ITensor *input, ITensor *output) static std::map map_function = { - { "op_F32_F32", &elementwise_op } + { "op_F32_F32", &elementwise_op }, + { "op_S32_S32", &elementwise_op }, }; #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - map_function["op_F16_F16"] = &elementwise_op; + map_function["op_F16_F16"] = &elementwise_op; #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ auto it = map_function.find(function_to_call); @@ -142,7 +161,7 @@ NEElementwiseUnaryKernel::NEElementwiseUnaryKernel() void NEElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensor *input, ITensor *output) { - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input->info(), *output->info())); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(op, *input->info(), *output->info())); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); // Configure kernel window @@ -168,16 +187,29 @@ void NEElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensor *inp case ElementWiseUnary::EXP: _function = configure_func(input, output); break; + case ElementWiseUnary::NEG: + _function = configure_func(input, output); + break; default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } } -Status NEElementwiseUnaryKernel::validate_arguments(const ITensorInfo &input, const ITensorInfo &output) +Status NEElementwiseUnaryKernel::validate_arguments(ElementWiseUnary op, const ITensorInfo &input, const ITensorInfo &output) { ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32); - + switch(op) + { + case ElementWiseUnary::EXP: + case ElementWiseUnary::RSQRT: + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32); + break; + case ElementWiseUnary::NEG: + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32, DataType::S32); + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } // Validate in case of configured output if(output.total_size() > 0) { @@ -189,9 +221,8 @@ Status NEElementwiseUnaryKernel::validate_arguments(const ITensorInfo &input, co Status NEElementwiseUnaryKernel::validate(ElementWiseUnary op, const ITensorInfo *input, const ITensorInfo *output) { - ARM_COMPUTE_UNUSED(op); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(op, *input, *output)); return Status{}; } diff --git a/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp b/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp index 10142c732f..48f4975b1a 100644 --- a/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp +++ b/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp @@ -51,4 +51,16 @@ Status NEExpLayer::validate(const ITensorInfo *input, const ITensorInfo *output) { return NEElementwiseUnaryKernel::validate(ElementWiseUnary::EXP, input, output); } + +void NENegLayer::configure(const ITensor *input, ITensor *output) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(ElementWiseUnary::NEG, input, output); + _kernel = std::move(k); +} +Status NENegLayer::validate(const ITensorInfo *input, const ITensorInfo *output) +{ + return NEElementwiseUnaryKernel::validate(ElementWiseUnary::NEG, input, output); +} + } // namespace arm_compute diff --git a/tests/validation/NEON/ElementwiseNegation.cpp b/tests/validation/NEON/ElementwiseNegation.cpp new file mode 100644 index 0000000000..7e7c838472 --- /dev/null +++ b/tests/validation/NEON/ElementwiseNegation.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ElementWiseUnaryFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +RelativeTolerance tolerance_fp32(0.000001f); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +RelativeTolerance tolerance_fp16(0.01f); +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +} // namespace +TEST_SUITE(NEON) +TEST_SUITE(NegLayer) + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), shape, data_type) +{ + // Create tensors + Tensor src = create_tensor(shape, data_type); + Tensor dst = create_tensor(shape, data_type); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + NENegLayer neg_layer; + neg_layer.configure(&src, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(src.info()->valid_region(), valid_region); + validate(dst.info()->valid_region(), valid_region); +} + +template +using NENegLayerFixture = NegValidationFixture; + +TEST_SUITE(Float) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NENegLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp16); +} + +TEST_SUITE_END() // FP16 +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, NENegLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32); +} +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // Float + +TEST_SUITE(Integer) +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::S32))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, NENegLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", + DataType::S32))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // S32 +TEST_SUITE_END() // Integer + +TEST_SUITE_END() // NegLayer +TEST_SUITE_END() // NEON +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/ElementWiseUnaryFixture.h b/tests/validation/fixtures/ElementWiseUnaryFixture.h index f508bc1d34..ba131630a3 100644 --- a/tests/validation/fixtures/ElementWiseUnaryFixture.h +++ b/tests/validation/fixtures/ElementWiseUnaryFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -53,7 +53,7 @@ public: protected: template - void fill(U &&tensor, int i) + void fill(U &&tensor, int i, DataType data_type) { switch(_op) { @@ -69,6 +69,28 @@ protected: library->fill(tensor, distribution, i); break; } + case ElementWiseUnary::NEG: + { + switch(data_type) + { + case DataType::F32: + case DataType::F16: + { + std::uniform_real_distribution<> distribution(-2.0f, 2.0f); + library->fill(tensor, distribution, i); + break; + } + case DataType::S32: + { + std::uniform_int_distribution distribution(-100, 100); + library->fill(tensor, distribution, i); + break; + } + default: + ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented"); + } + break; + } default: ARM_COMPUTE_ERROR("Not implemented"); } @@ -95,7 +117,7 @@ protected: ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors - fill(AccessorType(src), 0); + fill(AccessorType(src), 0, data_type); // Compute function elwiseunary_layer.run(); @@ -109,7 +131,7 @@ protected: SimpleTensor src{ shape, data_type }; // Fill reference - fill(src, 0); + fill(src, 0, data_type); return reference::elementwise_unary(src, _op); } @@ -140,6 +162,17 @@ public: ElementWiseUnaryValidationFixture::setup(shape, data_type, ElementWiseUnary::EXP); } }; + +template +class NegValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, ElementWiseUnary::NEG); + } +}; } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/ElementWiseUnary.cpp b/tests/validation/reference/ElementWiseUnary.cpp index ae7f256339..79310eae0f 100644 --- a/tests/validation/reference/ElementWiseUnary.cpp +++ b/tests/validation/reference/ElementWiseUnary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,9 @@ SimpleTensor elementwise_unary(const SimpleTensor &src, ElementWiseUnary o case ElementWiseUnary::EXP: dst[i] = std::exp(src[i]); break; + case ElementWiseUnary::NEG: + dst[i] = -src[i]; + break; default: ARM_COMPUTE_ERROR("Not implemented"); } @@ -56,6 +59,7 @@ SimpleTensor elementwise_unary(const SimpleTensor &src, ElementWiseUnary o template SimpleTensor elementwise_unary(const SimpleTensor &src, ElementWiseUnary op); template SimpleTensor elementwise_unary(const SimpleTensor &src, ElementWiseUnary op); +template SimpleTensor elementwise_unary(const SimpleTensor &src, ElementWiseUnary op); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1