aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-04-09 14:20:12 +0100
committerSadik Armagan <sadik.armagan@arm.com>2019-04-09 14:24:05 +0100
commit2999a02f0c6a6f290ce45f28c998a1c000d48f67 (patch)
treef9d13cec08ab8c6c47e68df512cddc613552a7d2
parent998517647d699d602e36f06b40d3f1d1ddaae7be (diff)
downloadarmnn-2999a02f0c6a6f290ce45f28c998a1c000d48f67.tar.gz
IVGCVSW-2862 Extend the Elementwise Workload to support QSymm16 Data Type
IVGCVSW-2863 Unit test per Elementwise operator with QSymm16 Data Type * Added QSymm16 support for Elementwise Operators * Added QSymm16 unit tests for Elementwise Operators Change-Id: I4e4e2938f9ed2cbbb1f05fb0f7dc476768550277 Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp18
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp830
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp72
-rw-r--r--src/backends/reference/RefLayerSupport.cpp170
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp32
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp21
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp32
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp22
8 files changed, 978 insertions, 219 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 1360ac5d0c..528e1faefc 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -493,7 +493,8 @@ void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
@@ -524,7 +525,8 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
std::vector<DataType> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
@@ -892,7 +894,8 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
@@ -922,7 +925,8 @@ void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
std::vector<DataType> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
@@ -952,7 +956,8 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
@@ -1094,7 +1099,8 @@ void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index f7c26b4009..74f3997133 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1442,6 +1442,14 @@ LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
workloadFactory, memoryManager, 2.f, 0);
}
+LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
+ workloadFactory, memoryManager, 2.f, 0);
+}
+
LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -1458,6 +1466,14 @@ LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
workloadFactory, memoryManager, 0.1333333f, 128);
}
+LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
+ workloadFactory, memoryManager, 0.1333333f, 0);
+}
+
LayerTestResult<float,4> CompareAdditionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1527,7 +1543,7 @@ LayerTestResult<float,4> CompareAdditionTest(
}
namespace {
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DivisionTestHelper(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1544,13 +1560,9 @@ LayerTestResult<T, 4> DivisionTestHelper(
float outScale,
int32_t outOffset)
{
- auto dataType = (std::is_same<T, uint8_t>::value ?
- armnn::DataType::QuantisedAsymm8 :
- armnn::DataType::Float32);
-
- armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
- armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
- armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
+ armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
+ armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
inputTensorInfo0.SetQuantizationScale(scale0);
inputTensorInfo0.SetQuantizationOffset(offset0);
@@ -1617,11 +1629,11 @@ LayerTestResult<float,4> DivisionByZeroTest(
INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
-INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
- return DivisionTestHelper<float>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 1.0f, 0);
+ return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape, input0, 1.0f, 0,
+ shape, input1, 1.0f, 0,
+ shape, output, 1.0f, 0);
}
LayerTestResult<float,4> DivisionTest(
@@ -1648,11 +1660,11 @@ LayerTestResult<float,4> DivisionTest(
1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
- return DivisionTestHelper<float>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 1.0f, 0);
+ return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape, input0, 1.0f, 0,
+ shape, input1, 1.0f, 0,
+ shape, output, 1.0f, 0);
}
LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
@@ -1668,11 +1680,11 @@ LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
- return DivisionTestHelper<float>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
@@ -1693,14 +1705,13 @@ LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18});
- return DivisionTestHelper<float>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
-
LayerTestResult<uint8_t,4> DivisionUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -1722,11 +1733,11 @@ LayerTestResult<uint8_t,4> DivisionUint8Test(
4, 4, 4, 4, 5, 5, 5, 5});
- return DivisionTestHelper<uint8_t>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 0.25f, 0);
+ return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape, input0, 1.0f, 0,
+ shape, input1, 1.0f, 0,
+ shape, output, 0.25f, 0);
}
LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
@@ -1741,11 +1752,11 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
- return DivisionTestHelper<uint8_t>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
@@ -1764,11 +1775,76 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18});
- return DivisionTestHelper<uint8_t>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<int16_t,4> DivisionInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5 });
+
+ std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
+ 4, 4, 4, 4, 4, 4, 4, 4 });
+
+ std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
+ 4, 4, 4, 4, 5, 5, 5, 5});
+
+
+ return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape, input0, 1.0f, 0,
+ shape, input1, 1.0f, 0,
+ shape, output, 0.25f, 0);
+}
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<int16_t> input1({ 2 });
+
+ std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+ return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 3, 3, 2 };
+ std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
+ 7, 16, 9, 20, 11, 24,
+ 13, 28, 15, 32, 17, 36});
+
+ unsigned int shape1[] = { 1, 1, 1, 2 };
+ std::vector<int16_t> input1({ 1, 2 });
+
+ std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18});
+
+ return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
template<typename DescriptorType>
@@ -2411,6 +2487,90 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
0);
}
+LayerTestResult<int16_t, 4> MaximumInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+ 3, 3, 3, 3, 4, 4, 4, 4 });
+
+ std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5 });
+
+ std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
+ 4, 4, 4, 4, 5, 5, 5, 5 });
+
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<int16_t> input1({2});
+
+ std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<int16_t> input1({ 1, 10, 3});
+
+ std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
+ 7, 10, 9, 10, 11, 12 });
+
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -2486,6 +2646,90 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
0);
}
+LayerTestResult<int16_t, 4> MinimumInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+ 3, 3, 3, 3, 4, 4, 4, 4 });
+
+ std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5 });
+
+ std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4 });
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<int16_t> input1({2});
+
+ std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2 });
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<int16_t> input1({ 1, 10, 3});
+
+ std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
+ 1, 8, 3, 1, 10, 3 });
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
namespace {
LayerTestResult<float,4> MultiplicationTestHelper(
armnn::IWorkloadFactory& workloadFactory,
@@ -6304,74 +6548,67 @@ LayerTestResult<uint8_t, 3> MergerUint8Test(
return ret;
}
-LayerTestResult<uint8_t, 4> AdditionUint8Test(
+
+namespace
+{
+template <typename T>
+LayerTestResult<T, 4> AdditionQuantizeTestHelper(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const unsigned int shape0[4],
+ const std::vector<T>& values0,
+ float scale0,
+ int32_t offset0,
+ const unsigned int shape1[4],
+ const std::vector<T> & values1,
+ float scale1,
+ int32_t offset1,
+ const unsigned int outShape[4],
+ const std::vector<T> & outValues,
+ float outScale,
+ int32_t outOffset)
{
- unsigned int batchSize = 1;
- unsigned int channels = 2;
- unsigned int height = 2;
- unsigned int width = 3;
-
- const float scale = 7.0f;
- const int32_t offset = 3;
-
- armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
- armnn::TensorInfo outputTensorInfo;
+ auto dataType = (std::is_same<T, uint8_t>::value ?
+ armnn::DataType::QuantisedAsymm8 :
+ armnn::DataType::QuantisedSymm16);
- const unsigned int shape[] = { batchSize, channels, height, width };
- inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
- inputTensorInfo1.SetQuantizationScale(scale);
- inputTensorInfo1.SetQuantizationOffset(offset);
+ armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
+ armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
+ armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
- inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
- inputTensorInfo2.SetQuantizationScale(scale);
- inputTensorInfo2.SetQuantizationOffset(offset);
+ inputTensorInfo0.SetQuantizationScale(scale0);
+ inputTensorInfo0.SetQuantizationOffset(offset0);
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(scale);
- outputTensorInfo.SetQuantizationOffset(offset);
+ inputTensorInfo1.SetQuantizationScale(scale1);
+ inputTensorInfo1.SetQuantizationOffset(offset1);
- // See dequantized values to the right.
- auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
- {
- 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
- 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
- }));
+ outputTensorInfo.SetQuantizationScale(outScale);
+ outputTensorInfo.SetQuantizationOffset(outOffset);
- // See dequantized values to the right.
- auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
- {
- 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
- 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
- }));
+ auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
- // See dequantized values to the right.
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
- {
- 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
- 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
- }));
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::AdditionQueueDescriptor data;
armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+ AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+ AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+ inputHandle0->Allocate();
inputHandle1->Allocate();
- inputHandle2->Allocate();
outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
workload->Execute();
@@ -6379,28 +6616,94 @@ LayerTestResult<uint8_t, 4> AdditionUint8Test(
return result;
}
+} // anonymous namespace
+
+LayerTestResult<uint8_t, 4> AdditionUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 2, 2, 3 };
+
+ std::vector<uint8_t> input0(
+ {
+ 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
+ 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
+ });
+
+ std::vector<uint8_t> input1(
+ {
+ 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
+ 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
+ });
+
+ std::vector<uint8_t> output(
+ {
+ 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
+ 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
+ });
+
+ return AdditionQuantizeTestHelper(workloadFactory,
+ memoryManager,
+ shape0, input0, 7.0f, 3,
+ shape1, input1, 7.0f, 3,
+ shape0, output, 7.0f, 3);
+}
+
+LayerTestResult<int16_t, 4> AdditionInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 2, 2, 3 };
+
+ std::vector<int16_t> input0(
+ {
+ 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
+ 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
+ });
+
+ std::vector<int16_t> input1(
+ {
+ 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
+ 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
+ });
+
+ std::vector<int16_t> output(
+ {
+ 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
+ 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
+ });
+
+ return AdditionQuantizeTestHelper(workloadFactory,
+ memoryManager,
+ shape0, input0, 7.0f, 0,
+ shape1, input1, 7.0f, 0,
+ shape0, output, 7.0f, 0);
+}
namespace
{
-LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const unsigned int shape0[4],
- const std::vector<uint8_t> & values0,
+ const std::vector<T> & values0,
float scale0,
int32_t offset0,
const unsigned int shape1[4],
- const std::vector<uint8_t> & values1,
+ const std::vector<T> & values1,
float scale1,
int32_t offset1,
const unsigned int outShape[4],
- const std::vector<uint8_t> & outValues,
+ const std::vector<T> & outValues,
float outScale,
int32_t outOffset)
{
- armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
- armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
- armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
+ armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
inputTensorInfo0.SetQuantizationScale(scale0);
inputTensorInfo0.SetQuantizationOffset(offset0);
@@ -6411,11 +6714,11 @@ LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
outputTensorInfo.SetQuantizationScale(outScale);
outputTensorInfo.SetQuantizationOffset(outOffset);
- auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
+ auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
- LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
@@ -6473,20 +6776,21 @@ LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
});
- return MultiplicationUint8TestHelper(workloadFactory,
- memoryManager,
- shape,
- input0,
- 4.0f,
- 1,
- shape,
- input1,
- 3.0f,
- -2,
- shape,
- output,
- 1366.255f, // Scale/offset chosen to have output values out of range.
- -5);
+ // Scale/offset chosen to have output values out of range.
+ return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ 4.0f,
+ 1,
+ shape,
+ input1,
+ 3.0f,
+ -2,
+ shape,
+ output,
+ 1366.255f,
+ -5);
}
LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
@@ -6508,20 +6812,20 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
14, 16, 18, 20, 22, 24
});
- return MultiplicationUint8TestHelper(workloadFactory,
- memoryManager,
- shape0,
- input0,
- 1.0f,
- 0,
- shape1,
- input1,
- 1.0f,
- 0,
- shape0,
- output,
- 1.0f,
- 0);
+ return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ 1.0f,
+ 0,
+ shape1,
+ input1,
+ 1.0f,
+ 0,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
@@ -6543,25 +6847,139 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7, 16, 27, 10, 22, 36
});
- return MultiplicationUint8TestHelper(workloadFactory,
- memoryManager,
- shape0,
- input0,
- 1.0f,
- 0,
- shape1,
- input1,
- 1.0f,
- 0,
- shape0,
- output,
- 1.0f,
- 0);
+ return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ 1.0f,
+ 0,
+ shape1,
+ input1,
+ 1.0f,
+ 0,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MultiplicationInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape[] = { 1, 2, 2, 3 };
+
+ std::vector<int16_t> input0(
+ {
+ 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17
+ });
+
+ std::vector<int16_t> input1(
+ {
+ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12
+ });
+
+ std::vector<int16_t> output(
+ {
+ 6, 14, 24, 36, 50, 66,
+ 84, 104, 126, 150, 176, 204
+ });
+
+ return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ 1.0f,
+ 0,
+ shape,
+ input1,
+ 1.0f,
+ 0,
+ shape,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int16_t> input0(
+ {
+ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12
+ });
+
+ std::vector<int16_t> input1({2});
+
+ std::vector<int16_t> output(
+ {
+ 2, 4, 6, 8, 10, 12,
+ 14, 16, 18, 20, 22, 24
+ });
+
+ return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ 1.0f,
+ 0,
+ shape1,
+ input1,
+ 1.0f,
+ 0,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int16_t> input0(
+ {
+ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12
+ });
+
+ std::vector<int16_t> input1({1, 2, 3});
+
+ std::vector<int16_t> output(
+ {
+ 1, 4, 9, 4, 10, 18,
+ 7, 16, 27, 10, 22, 36
+ });
+
+ return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ 1.0f,
+ 0,
+ shape1,
+ input1,
+ 1.0f,
+ 0,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
namespace
{
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SubtractionTestHelper(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -6578,13 +6996,9 @@ LayerTestResult<T, 4> SubtractionTestHelper(
float outScale,
int32_t outOffset)
{
- auto dataType = (std::is_same<T, uint8_t>::value ?
- armnn::DataType::QuantisedAsymm8 :
- armnn::DataType::Float32);
-
- armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
- armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
- armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
+ armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
+ armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
inputTensorInfo0.SetQuantizationScale(scale0);
inputTensorInfo0.SetQuantizationOffset(offset0);
@@ -6639,11 +7053,11 @@ LayerTestResult<uint8_t, 4> SubtractionUint8Test(
std::vector<uint8_t> input1({ 1, 2, 1, 2 });
std::vector<uint8_t> output({ 3, 3, 5, 5 });
- return SubtractionTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 0.5f, 2,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0, input0, 0.5f, 2,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
@@ -6657,11 +7071,11 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
std::vector<uint8_t> input1({ 2 });
std::vector<uint8_t> output({ 5, 6, 7, 8 });
- return SubtractionTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 0.5f, 2,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 3);
+ return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0, input0, 0.5f, 2,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 3);
}
LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
@@ -6675,11 +7089,11 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
std::vector<uint8_t> input1({ 2, 1 });
std::vector<uint8_t> output({ 8, 11, 12, 15 });
- return SubtractionTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<float, 4> SubtractionTest(
@@ -6693,11 +7107,11 @@ LayerTestResult<float, 4> SubtractionTest(
std::vector<float> input1({ 1, -1, 0, 2 });
std::vector<float> output({ 0, 3, 3, 2 });
- return SubtractionTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
@@ -6711,11 +7125,11 @@ LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
std::vector<float> input1({ 10 });
std::vector<float> output({ -9, -8, -7, -6 });
- return SubtractionTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<float, 4> SubtractionBroadcastTest(
@@ -6729,11 +7143,65 @@ LayerTestResult<float, 4> SubtractionBroadcastTest(
std::vector<float> input1({ 10, -5 });
std::vector<float> output({ -9, 7, -7, 9 });
- return SubtractionTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
+ return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<int16_t, 4> SubtractionInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 2, 2 };
+
+ std::vector<int16_t> input0({ 10, 12, 14, 16 });
+ std::vector<int16_t> input1({ 1, 2, 1, 2 });
+ std::vector<int16_t> output({ 3, 3, 5, 5 });
+
+ return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0, input0, 0.5f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int16_t> input0({ 10, 12, 14, 16 });
+ std::vector<int16_t> input1({ 2 });
+ std::vector<int16_t> output({ 3, 4, 5, 6 });
+
+ return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0, input0, 0.5f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 2, 1 };
+
+ std::vector<int16_t> input0({ 10, 12, 14, 16 });
+ std::vector<int16_t> input1({ 2, 1 });
+ std::vector<int16_t> output({ 8, 11, 12, 15 });
+
+ return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
+ memoryManager,
+ shape0, input0, 1.0f, 0,
+ shape1, input1, 1.0f, 0,
+ shape0, output, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index d589a40d76..c748be1fc6 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -781,6 +781,18 @@ LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> AdditionInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 4> SubtractionUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -793,6 +805,18 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> SubtractionInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 4> CompareActivationUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -817,6 +841,18 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> MultiplicationInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 4> DivisionUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -829,6 +865,18 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> DivisionInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1076,6 +1124,18 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t , 4> MaximumInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -1136,6 +1196,18 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
armnn::IWorkloadFactory & workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager);
+LayerTestResult<int16_t , 4> MinimumInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index d2cf6f904a..3512d52acf 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -228,9 +228,10 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,2> supportedTypes = {
+ std::array<DataType,3> supportedTypes = {
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -432,12 +433,33 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input1);
- ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input0.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes = {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference division: input 0 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
+ "Reference division: input 1 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference division: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
+ "Reference division: input 0 and Input 1 types are mismatched");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference division: input and output types are mismatched");
+
+ supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
+ "Reference division: shapes are not suitable for implicit broadcast.");
+
+ return supported;
}
bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0,
@@ -606,12 +628,33 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input1);
- ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input0.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes = {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference maximum: input 0 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
+ "Reference maximum: input 1 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference maximum: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
+ "Reference maximum: input 0 and Input 1 types are mismatched");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference maximum: input and output types are mismatched");
+
+ supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
+ "Reference maximum: shapes are not suitable for implicit broadcast.");
+
+ return supported;
}
bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
@@ -659,12 +702,33 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input1);
- ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input0.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes = {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference minimum: input 0 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
+ "Reference minimum: input 1 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference minimum: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
+ "Reference minimum: input 0 and Input 1 types are mismatched");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference minimum: input and output types are mismatched");
+
+ supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
+ "Reference minimum: shapes are not suitable for implicit broadcast.");
+
+ return supported;
}
bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
@@ -672,12 +736,33 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input1);
- ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input0.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes = {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference multiplication: input 0 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
+ "Reference multiplication: input 1 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference multiplication: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
+ "Reference multiplication: input 0 and Input 1 types are mismatched");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference multiplication: input and output types are mismatched");
+
+ supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
+ "Reference multiplication: shapes are not suitable for implicit broadcast.");
+
+ return supported;
}
bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
@@ -860,12 +945,33 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input1);
- ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input0.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes = {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference subtraction: input 0 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
+ "Reference subtraction: input 1 is not a supported type.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference subtraction: output is not a supported type.");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
+ "Reference subtraction: input 0 and Input 1 types are mismatched");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference subtraction: input and output types are mismatched");
+
+ supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
+ "Reference subtraction: shapes are not suitable for implicit broadcast.");
+
+ return supported;
}
} // namespace armnn
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 09b0246895..4b4e5449b4 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -96,6 +96,14 @@ BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
armnn::DataType::QuantisedAsymm8>();
}
+BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
+{
+ RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ armnn::DataType::QuantisedSymm16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
{
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
@@ -112,6 +120,14 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
armnn::DataType::QuantisedAsymm8>();
}
+BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
+{
+ RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ armnn::DataType::QuantisedSymm16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
{
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
@@ -128,6 +144,14 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
armnn::DataType::QuantisedAsymm8>();
}
+BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
+{
+ RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ armnn::DataType::QuantisedSymm16>();
+}
+
BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
{
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
@@ -144,6 +168,14 @@ BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
armnn::DataType::QuantisedAsymm8>();
}
+BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
+{
+ RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::QuantisedSymm16>();
+}
+
template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
{
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 3206b762ff..cbc56d14b7 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -228,6 +228,10 @@ ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test)
ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test)
ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(AdditionInt16, AdditionInt16Test)
+ARMNN_AUTO_TEST_CASE(AddBroadcastInt16, AdditionBroadcastInt16Test)
+ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementInt16, AdditionBroadcast1ElementInt16Test)
+
// Sub
ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest)
ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest)
@@ -237,6 +241,10 @@ ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test)
ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test)
ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(SubtractionInt16, SubtractionInt16Test)
+ARMNN_AUTO_TEST_CASE(SubBroadcastInt16, SubtractionBroadcastInt16Test)
+ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementInt16, SubtractionBroadcast1ElementInt16Test)
+
// Div
ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
@@ -248,6 +256,10 @@ ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_CASE(DivisionInt16, DivisionInt16Test)
+ARMNN_AUTO_TEST_CASE(DivisionInt16Broadcast1Element, DivisionBroadcast1ElementInt16Test)
+ARMNN_AUTO_TEST_CASE(DivisionInt16Broadcast1DVector, DivisionBroadcast1DVectorInt16Test)
+
// Equal
ARMNN_AUTO_TEST_CASE(SimpleEqual, EqualSimpleTest)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
@@ -271,11 +283,17 @@ ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
ARMNN_AUTO_TEST_CASE(MaximumUint8, MaximumUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_CASE(MaximumInt16, MaximumInt16Test)
+ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementInt16, MaximumBroadcast1ElementInt16Test)
+ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorInt16, MaximumBroadcast1DVectorInt16Test)
// Min
ARMNN_AUTO_TEST_CASE(SimpleMinimum1, MinimumBroadcast1ElementTest1)
ARMNN_AUTO_TEST_CASE(SimpleMinimum2, MinimumBroadcast1ElementTest2)
ARMNN_AUTO_TEST_CASE(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_CASE(MinimumInt16, MinimumInt16Test)
+ARMNN_AUTO_TEST_CASE(MinimumBroadcast1ElementInt16, MinimumBroadcast1ElementInt16Test)
+ARMNN_AUTO_TEST_CASE(MinimumBroadcast1DVectorInt16, MinimumBroadcast1DVectorInt16Test)
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
@@ -284,6 +302,9 @@ ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DV
ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationInt16, MultiplicationInt16Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementInt16, MultiplicationBroadcast1ElementInt16Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorInt16, MultiplicationBroadcast1DVectorInt16Test)
// Batch Norm
ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index cfa8ce7e91..95c75a576a 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -112,6 +112,22 @@ public:
}
};
+class QSymm16Decoder : public TypedIterator<const int16_t, Decoder>
+{
+public:
+ QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset)
+ : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
+
+ float Get() const override
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
+ }
+
+private:
+ const float m_Scale;
+ const int32_t m_Offset;
+};
+
class FloatEncoder : public TypedIterator<float, Encoder>
{
public:
@@ -152,4 +168,20 @@ public:
}
};
+class QSymm16Encoder : public TypedIterator<int16_t, Encoder>
+{
+public:
+ QSymm16Encoder(int16_t* data, const float scale, const int32_t offset)
+ : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
+
+ void Set(const float& right) override
+ {
+ *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset);
+ }
+
+private:
+ const float m_Scale;
+ const int32_t m_Offset;
+};
+
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index 6e6e1d5f21..1a30e7c9fb 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -64,6 +64,28 @@ void RefElementwiseWorkload<Functor, ParentDescriptor, DebugString>::Execute() c
encodeIterator0);
break;
}
+ case armnn::DataType::QuantisedSymm16:
+ {
+ QSymm16Decoder decodeIterator0(GetInputTensorData<int16_t>(0, m_Data),
+ inputInfo0.GetQuantizationScale(),
+ inputInfo0.GetQuantizationOffset());
+
+ QSymm16Decoder decodeIterator1(GetInputTensorData<int16_t>(1, m_Data),
+ inputInfo1.GetQuantizationScale(),
+ inputInfo1.GetQuantizationOffset());
+
+ QSymm16Encoder encodeIterator0(GetOutputTensorData<int16_t>(0, m_Data),
+ outputInfo.GetQuantizationScale(),
+ outputInfo.GetQuantizationOffset());
+
+ ElementwiseFunction<Functor, Decoder, Encoder>(inShape0,
+ inShape1,
+ outShape,
+ decodeIterator0,
+ decodeIterator1,
+ encodeIterator0);
+ break;
+ }
default:
BOOST_ASSERT_MSG(false, "RefElementwiseWorkload: Not supported Data Type!");
break;