From a1e7e2818ab282e4d3b707feb5783b4bd4fbe45b Mon Sep 17 00:00:00 2001 From: George Wort Date: Tue, 15 Jan 2019 11:00:29 +0000 Subject: COMPMID-1768: NEON: Implement RealDiv Change-Id: I0868669f7b733df141794fba1d79436e7581bd3a Reviewed-on: https://review.mlplatform.org/426 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- .../NEON/kernels/NEElementwiseOperationKernel.h | 41 ++++-- arm_compute/core/NEON/wrapper/intrinsics/div.h | 73 ++++++++++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 1 + .../NEON/functions/NEElementwiseOperations.h | 28 +++- .../NEON/kernels/NEElementwiseOperationKernel.cpp | 42 +++++- .../NEON/functions/NEElementwiseOperators.cpp | 12 ++ tests/validation/NEON/ElementwiseDivision.cpp | 155 +++++++++++++++++++++ .../fixtures/ElementwiseOperationsFixture.h | 57 ++++++++ .../validation/reference/ElementwiseOperations.cpp | 4 +- 9 files changed, 401 insertions(+), 12 deletions(-) create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/div.h create mode 100644 tests/validation/NEON/ElementwiseDivision.cpp diff --git a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h index f02f71b50e..1271da75a5 100644 --- a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h +++ b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h @@ -94,10 +94,8 @@ protected: class NEArithmeticOperationKernel : public NEElementwiseOperationKernel { public: - NEArithmeticOperationKernel() - : NEElementwiseOperationKernel() - { - } + /** Default constructor */ + NEArithmeticOperationKernel() = default; /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * @@ -126,13 +124,40 @@ protected: static Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output); }; +class NEDivisionOperationKernel : public NEArithmeticOperationKernel +{ +public: + /** Default constructor */ + NEDivisionOperationKernel() = default; + + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel + * + * @param[in] input1 First tensor input. Data types supported: F16/F32. + * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. + * @param[in] output Output tensor. Data types supported: Same as @p input1. + */ + void configure(const ITensor *input1, const ITensor *input2, ITensor *output); + + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel + * + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * + * @return a Status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output); + +protected: + // Inherited methods overridden: + static Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output); +}; + class NEComparisonOperationKernel : public NEElementwiseOperationKernel { public: - NEComparisonOperationKernel() - : NEElementwiseOperationKernel() - { - } + /** Default constructor */ + NEComparisonOperationKernel() = default; /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel * diff --git a/arm_compute/core/NEON/wrapper/intrinsics/div.h b/arm_compute/core/NEON/wrapper/intrinsics/div.h new file mode 100644 index 0000000000..d9f80d061f --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/div.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_DIV_H__ +#define __ARM_COMPUTE_WRAPPER_DIV_H__ + +#include "arm_compute/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#ifdef __aarch64__ + +#define VDIV_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vdiv(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } +VDIV_IMPL(float32x2_t, float32x2_t, vdiv, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x4_t, float16x4_t, vdiv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VDIV_IMPL(float32x4_t, float32x4_t, vdivq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x8_t, float16x8_t, vdivq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#else // __aarch64__ + +#define VDIV_IMPL(stype, vtype, mul_prefix, inv_prefix, postfix) \ + inline vtype vdiv(const vtype &a, const vtype &b) \ + { \ + return mul_prefix##_##postfix(a, inv_prefix##_##postfix(b)); \ + } +VDIV_IMPL(float32x2_t, float32x2_t, vmul, vinv, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x4_t, float16x4_t, vmul, vinv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VDIV_IMPL(float32x4_t, float32x4_t, vmulq, vinvq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x8_t, float16x8_t, vmulq, vinvq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#endif // __aarch64__ + +#undef VDIV_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_DIV_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index c8f4a6e041..012f6868d1 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -33,6 +33,7 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/cgt.h" #include "arm_compute/core/NEON/wrapper/intrinsics/clt.h" #include "arm_compute/core/NEON/wrapper/intrinsics/combine.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/div.h" #include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h" #include "arm_compute/core/NEON/wrapper/intrinsics/exp.h" #include "arm_compute/core/NEON/wrapper/intrinsics/gethigh.h" diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h index cd9ed24bee..ca3717a709 100644 --- a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h +++ b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h @@ -109,7 +109,33 @@ public: static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output); }; -/** Basic function to run @ref NEComparisonOperationKernel +/** Basic function to run @ref NEArithmeticOperationKernel for division + * + * @note The tensor data type for the inputs must be F16/F32. + * @note The function performs a squared different operation between two tensors (i.e., out[i] = in1[i] / in2[i]) + */ +class NEElementwiseDivision : public INESimpleFunction +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input. Data types supported: F16/F32. + * @param[in, out] input2 Second tensor input. Data types supported: Same as @p input1. + * @param[out] output Output tensor. Data types supported: Same as @p input1. + */ + void configure(ITensor *input1, ITensor *input2, ITensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for division + * + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output); +}; + +/** Basic function to run @ref NEComparisonOperationKernel. * * @note The tensor data type for the inputs must be QASYMM8/S16/F16/S32/F32. * @note The function performs a comparison operation between two tensors. diff --git a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp index 88fd730554..99a3b5ac66 100644 --- a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp +++ b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp @@ -123,6 +123,11 @@ inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const Scalar res = (a - b) * (a - b); break; } + case ArithmeticOperation::DIV: + { + res = a / b; + break; + } default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } @@ -154,7 +159,6 @@ inline VectorType elementwise_arithm_op(const VectorType &a, const VectorType &b res = wrapper::vmul(tmp, tmp); break; } - default: ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } @@ -162,6 +166,20 @@ inline VectorType elementwise_arithm_op(const VectorType &a, const VectorType &b return res; } +template <> +inline float32x4_t elementwise_arithm_op(const float32x4_t &a, const float32x4_t &b) +{ + return wrapper::vdiv(a, b); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template <> +inline float16x8_t elementwise_arithm_op(const float16x8_t &a, const float16x8_t &b) +{ + return wrapper::vdiv(a, b); +} +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + template inline float32x4x4_t elementwise_arithm_op(const float32x4x4_t &a, const float32x4x4_t &b) { @@ -833,6 +851,28 @@ Status NEArithmeticOperationKernel::validate(ArithmeticOperation op, const ITens return Status{}; } +/** The division operator */ + +void NEDivisionOperationKernel::configure(const ITensor *input1, const ITensor *input2, ITensor *output) +{ + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info())); + configure_common(input1, input2, output); + _function = configure_arithm_func(input1, input2, output); +} + +Status NEDivisionOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::F16, DataType::F32); + return NEArithmeticOperationKernel::validate_arguments(input1, input2, output); +} + +Status NEDivisionOperationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output)); + return Status{}; +} + /** Comparison operators (equal, not equal, less than, greater than, less than or equal, greater than or equal) */ void NEComparisonOperationKernel::configure(ComparisonOperation op, const ITensor *input1, const ITensor *input2, ITensor *output) diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp index 711e99ea77..4c570685bc 100644 --- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp +++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp @@ -67,6 +67,18 @@ Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITens return NEArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output); } +void NEElementwiseDivision::configure(ITensor *input1, ITensor *input2, ITensor *output) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input1, input2, output); + _kernel = std::move(k); +} + +Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output) +{ + return NEDivisionOperationKernel::validate(input1, input2, output); +} + template void NEElementwiseComparisonStatic::configure(ITensor *input1, ITensor *input2, ITensor *output) { diff --git a/tests/validation/NEON/ElementwiseDivision.cpp b/tests/validation/NEON/ElementwiseDivision.cpp new file mode 100644 index 0000000000..86a2056ca8 --- /dev/null +++ b/tests/validation/NEON/ElementwiseDivision.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ElementwiseOperationsFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +RelativeTolerance tolerance_fp32(0.000001f); +/** Input data sets **/ +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +RelativeTolerance tolerance_fp16(static_cast(0.01f)); +const auto ElementwiseDivisionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataType", DataType::F16)); +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +const auto ElementwiseDivisionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataType", DataType::F32)); +} // namespace + +TEST_SUITE(NEON) +TEST_SUITE(ElementwiseDivision) + +template +using NEElementwiseDivisionFixture = ElementwiseDivisionValidationFixture; + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid data type combination + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + }), + framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), + TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), + })), + framework::dataset::make("Expected", { true, true, true, false, false})), + input1_info, input2_info, output_info, expected) +{ + ARM_COMPUTE_EXPECT(bool(NEElementwiseDivision::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +TEST_SUITE(Float) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(F16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseDivisionFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseDivisionFP16Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp16, 0.01); +} +TEST_SUITE_END() // F16 +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + +TEST_SUITE(F32) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), + shape) +{ + // Create tensors + Tensor ref_src1 = create_tensor(shape, DataType::F32); + Tensor ref_src2 = create_tensor(shape, DataType::F32); + Tensor dst = create_tensor(shape, DataType::F32); + + // Create and Configure function + NEElementwiseDivision add; + add.configure(&ref_src1, &ref_src2, &dst); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseDivisionFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), ElementwiseDivisionFP32Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32, 0.01); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, NEElementwiseDivisionFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), ElementwiseDivisionFP32Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32, 0.01); +} + +template +using NEElementwiseDivisionBroadcastFixture = ElementwiseDivisionBroadcastValidationFixture; + +FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEElementwiseDivisionBroadcastFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapesBroadcast(), + ElementwiseDivisionFP32Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32, 0.01); +} + +FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEElementwiseDivisionBroadcastFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapesBroadcast(), + ElementwiseDivisionFP32Dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32, 0.01); +} +TEST_SUITE_END() // F32 +TEST_SUITE_END() // Float + +TEST_SUITE_END() // ElementwiseDivision +TEST_SUITE_END() // NEON +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h index 8b88f6a8fb..53030842e7 100644 --- a/tests/validation/fixtures/ElementwiseOperationsFixture.h +++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h @@ -325,6 +325,63 @@ public: qinfo0, qinfo1, qinfo_out); } }; + +template +class ElementwiseDivisionBroadcastValidationFixture : public ArithmeticOperationsGenericFixture +{ +public: + template + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + { + ArithmeticOperationsGenericFixture::setup(ArithmeticOperation::DIV, shape0, shape1, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + } +}; + +template +class ElementwiseDivisionValidationFixture : public ArithmeticOperationsGenericFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + { + ArithmeticOperationsGenericFixture::setup(ArithmeticOperation::DIV, shape, shape, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + } +}; + +template +class ElementwiseDivisionValidationQuantizedFixture : public ArithmeticOperationsGenericFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + + { + ArithmeticOperationsGenericFixture::setup(ArithmeticOperation::DIV, shape, shape, + data_type0, data_type1, output_data_type, + qinfo0, qinfo1, qinfo_out); + } +}; + +template +class ElementwiseDivisionQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture +{ +public: + template + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + + { + ArithmeticOperationsGenericFixture::setup(ArithmeticOperation::DIV, shape0, shape1, + data_type0, data_type1, output_data_type, + qinfo0, qinfo1, qinfo_out); + } +}; + } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/ElementwiseOperations.cpp b/tests/validation/reference/ElementwiseOperations.cpp index 1f0d42b26e..6d533edea5 100644 --- a/tests/validation/reference/ElementwiseOperations.cpp +++ b/tests/validation/reference/ElementwiseOperations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -74,7 +74,7 @@ T arithm_op(ArithmeticOperation op, T src1, T src2, ConvertPolicy convert_policy } T result; - if(op == ArithmeticOperation::ADD || op == ArithmeticOperation::SUB) + if(op == ArithmeticOperation::ADD || op == ArithmeticOperation::SUB || op == ArithmeticOperation::DIV) { result = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast(val) : static_cast(val); } -- cgit v1.2.1