aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormorgolock <pablo.tello@arm.com>2020-01-10 10:11:14 +0000
committerPablo Marquez <pablo.tello@arm.com>2020-01-10 17:12:20 +0000
commit781d727a741eb264f0e6614b41780c05050972e3 (patch)
tree7a52013582f408a08e37c2b19bad781adcd96746
parent7fdcfb113cc5651eeb5f35333a41434bc4c2223f (diff)
downloadComputeLibrary-781d727a741eb264f0e6614b41780c05050972e3.tar.gz
COMPMID-2994: Add support QASYMM8_SIGNED in NEElementwiseMin
Change-Id: Ic5b8c69b90fdad8e7585169c8d4eae449162121d Signed-off-by: morgolock <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/2570 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/runtime/NEON/functions/NEElementwiseOperations.h4
-rw-r--r--src/runtime/NEON/functions/NEElementwiseOperators.cpp11
-rw-r--r--tests/validation/NEON/ElementwiseMin.cpp42
3 files changed, 47 insertions, 10 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
index d2281f8bea..9499867f81 100644
--- a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
+++ b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
@@ -67,14 +67,14 @@ class NEElementwiseMin : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
* @param[in, out] input2 Second tensor input. Data types supported: Same as @p input1.
* @param[out] output Output tensor. Data types supported: Same as @p input1.
*/
void configure(ITensor *input1, ITensor *input2, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for min
*
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
* @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
* @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
index ede8c20428..2c263d5f9a 100644
--- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,9 +21,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h"
#include <arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h>
-#include "arm_compute/core/Validate.h"
#include "arm_compute/core/ITensor.h"
#include "support/ToolchainSupport.h"
@@ -41,7 +41,7 @@ void NEElementwiseMax::configure(ITensor *input1, ITensor *input2, ITensor *outp
Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
- if( input1->data_type() == DataType::QASYMM8_SIGNED)
+ if(input1->data_type() == DataType::QASYMM8_SIGNED)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input1, input2, output);
@@ -58,6 +58,11 @@ void NEElementwiseMin::configure(ITensor *input1, ITensor *input2, ITensor *outp
Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
+ if(input1->data_type() == DataType::QASYMM8_SIGNED)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input1, input2, output);
+ }
return NEArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output);
}
diff --git a/tests/validation/NEON/ElementwiseMin.cpp b/tests/validation/NEON/ElementwiseMin.cpp
index cda389aa38..9b950a06a7 100644
--- a/tests/validation/NEON/ElementwiseMin.cpp
+++ b/tests/validation/NEON/ElementwiseMin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,11 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
const auto ElementwiseMinQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataType",
DataType::QASYMM8));
+
+const auto ElementwiseMaxQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED));
+
const auto ElementwiseMinS32Dataset = combine(combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::S32)), framework::dataset::make("DataType",
DataType::S32));
const auto ElementwiseMinS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
@@ -70,25 +75,38 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), // Invalid data type combination
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), // Invalid data type combination
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), // Ok
+ TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), // Mismatching types, cannot mix QASYMM8_SIGNED with QASYMM8
+ TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.1f, 2) ), // Mismatching qinfo
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED),
+ TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.5f, 1) ),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED),
+ TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED),
+ TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.5f, 1) ),
})),
- framework::dataset::make("Expected", { true, true, true, false, false})),
+ framework::dataset::make("Expected", { true, true, true, false,
+ false,true,false,false})),
input1_info, input2_info, output_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(NEElementwiseMin::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(NEElementwiseMin::validate(
+ &input1_info.clone()->set_is_resizable(false),
+ &input2_info.clone()->set_is_resizable(false),
+ &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
@@ -139,6 +157,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseMinQuantizedFixture<uint8_t>, fram
validate(Accessor(_target), _reference, tolerance_fp32, 0.01);
}
TEST_SUITE_END()
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseMinQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ ElementwiseMaxQASYMM8SignedDataset),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+TEST_SUITE_END()
+
TEST_SUITE_END()
TEST_SUITE(Float)