aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNina Drozd <nina.drozd@arm.com>2019-05-29 10:41:04 +0100
committerNina Drozd <nina.drozd@arm.com>2019-05-29 14:41:07 +0100
commit8ed4b8c34df929d29952f6b3f835bf61d57466e7 (patch)
treea299751d347bd0d9523179ebfce926983e1b9135
parenta70fe6049672710ffc06b708198ef44f3873eb89 (diff)
downloadarmnn-8ed4b8c34df929d29952f6b3f835bf61d57466e7.tar.gz
IVGCVSW-3172 Add QSymm16 support for reshape workload
* Added QSymm16 to supported types in WorkloadData * Added QSymm16 to supported types in RefLayerSupport * Created templated SimpleReshapeTest in LayerTests * Updated ClLayerTests to use templated SimpleReshapeTest * Updated NeonLayerTests to use templated SimpleReshapeTest * Added test for QSymm16 to RefCreateWorkloadTests, RefLayerTests * Removed ReshapeTestImpl as all reshape test methods were moved * Added FloorTestImpl for existing SimpleFloorTest Change-Id: I78efede8aab74c2d4cb0841dd426b8e06186133d Signed-off-by: Nina Drozd <nina.drozd@arm.com>
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp3
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/FloorTestImpl.hpp56
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp96
-rw-r--r--src/backends/backendsCommon/test/ReshapeTestImpl.hpp188
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp4
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp4
-rw-r--r--src/backends/reference/RefLayerSupport.cpp5
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp9
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp5
11 files changed, 168 insertions, 206 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 9bb95f67af..96e83c21dd 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -878,7 +878,8 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, "ReshapeQueueDescriptor");
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 508fc77ba6..040835e7f2 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -41,7 +41,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
Pooling2dTestImpl.hpp
QuantizeHelper.hpp
QuantizeTestImpl.hpp
- ReshapeTestImpl.hpp
+ FloorTestImpl.hpp
RuntimeTestImpl.hpp
SoftmaxTestImpl.hpp
SplitterEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/FloorTestImpl.hpp b/src/backends/backendsCommon/test/FloorTestImpl.hpp
new file mode 100644
index 0000000000..419b1b7bd8
--- /dev/null
+++ b/src/backends/backendsCommon/test/FloorTestImpl.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+#include "WorkloadTestUtils.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+LayerTestResult<float, 4> SimpleFloorTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(inputTensorInfo);
+
+ auto input = MakeTensor<float, 4>(inputTensorInfo,
+ { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+
+ LayerTestResult<float, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
+ { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
+ 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f });
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::FloorQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 402e86de00..bc8e13d1ae 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -30,7 +30,7 @@
#include "BatchNormTestImpl.hpp"
#include "ActivationTestImpl.hpp"
#include "Pooling2dTestImpl.hpp"
-#include "ReshapeTestImpl.hpp"
+#include "FloorTestImpl.hpp"
#include "FullyConnectedTestImpl.hpp"
#include "GatherTestImpl.hpp"
#include "SpaceToBatchNdTestImpl.hpp"
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index f8f50366eb..edb70455d5 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -373,11 +373,8 @@ LayerTestResult<float, 4> SimpleSigmoidTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-LayerTestResult<float, 4> SimpleReshapeFloat32Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleReshapeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -1711,6 +1708,42 @@ std::vector<T> ConvertToDataType(const std::vector<float>& input,
return output;
}
+template<typename T>
+LayerTestResult<T, 4> SimpleReshapeTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::TensorInfo inputTensorInfo,
+ armnn::TensorInfo outputTensorInfo,
+ const std::vector<T>& inputData,
+ const std::vector<T>& outputExpectedData)
+{
+ auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+ LayerTestResult<T, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ReshapeQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}
+
template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 2> FullyConnectedTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -1780,4 +1813,57 @@ LayerTestResult<T, 2> FullyConnectedTest(
}
return result;
+}
+
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleReshapeTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 2, 2, 3, 3 };
+ unsigned int outputShape[] = { 2, 2, 9, 1 };
+
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(1.0f);
+
+ auto input = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+
+ 9.0f, 10.0f, 11.0f,
+ 12.0f, 13.0f, 14.0f,
+ 15.0f, 16.0f, 17.0f,
+
+ 18.0f, 19.0f, 20.0f,
+ 21.0f, 22.0f, 23.0f,
+ 24.0f, 25.0f, 26.0f,
+
+ 27.0f, 28.0f, 29.0f,
+ 30.0f, 31.0f, 32.0f,
+ 33.0f, 34.0f, 35.0f,
+ },
+ inputTensorInfo);
+
+ auto outputExpected = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+
+ 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+
+ 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f,
+
+ 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+ },
+ outputTensorInfo);
+
+ return SimpleReshapeTestImpl<T>(
+ workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected);
} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/ReshapeTestImpl.hpp b/src/backends/backendsCommon/test/ReshapeTestImpl.hpp
deleted file mode 100644
index 49918c5cd1..0000000000
--- a/src/backends/backendsCommon/test/ReshapeTestImpl.hpp
+++ /dev/null
@@ -1,188 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include "QuantizeHelper.hpp"
-#include "WorkloadTestUtils.hpp"
-
-#include <armnn/ArmNN.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/TypesUtils.hpp>
-
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <backendsCommon/IBackendInternal.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-template<typename T>
-LayerTestResult<T, 4> SimpleReshapeTestImpl(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::TensorInfo inputTensorInfo,
- armnn::TensorInfo outputTensorInfo,
- const std::vector<T>& inputData,
- const std::vector<T>& outputExpectedData)
-{
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::ReshapeQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- return ret;
-}
-
-LayerTestResult<float, 4> SimpleReshapeFloat32Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo;
- armnn::TensorInfo outputTensorInfo;
-
- unsigned int inputShape[] = { 2, 2, 3, 3 };
- unsigned int outputShape[] = { 2, 2, 9, 1 };
-
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
-
- std::vector<float> input = std::vector<float>(
- {
- 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f,
- 6.0f, 7.0f, 8.0f,
-
- 9.0f, 10.0f, 11.0f,
- 12.0f, 13.0f, 14.0f,
- 15.0f, 16.0f, 17.0f,
-
- 18.0f, 19.0f, 20.0f,
- 21.0f, 22.0f, 23.0f,
- 24.0f, 25.0f, 26.0f,
-
- 27.0f, 28.0f, 29.0f,
- 30.0f, 31.0f, 32.0f,
- 33.0f, 34.0f, 35.0f,
- });
-
- std::vector<float> outputExpected = std::vector<float>(
- {
- 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
-
- 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
-
- 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f,
-
- 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
- });
-
- return SimpleReshapeTestImpl<float>(
- workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected);
-}
-
-LayerTestResult<float, 4> SimpleFloorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(inputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo,
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
-
- LayerTestResult<float, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
- { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
- 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f });
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::FloorQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- return ret;
-}
-
-LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- armnn::TensorInfo inputTensorInfo;
- armnn::TensorInfo outputTensorInfo;
-
- unsigned int inputShape[] = { 2, 2, 3, 3 };
- unsigned int outputShape[] = { 2, 2, 9, 1 };
-
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8);
- inputTensorInfo.SetQuantizationScale(1.0f);
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(1.0f);
-
- std::vector<uint8_t> input = std::vector<uint8_t>(
- {
- 0, 1, 2,
- 3, 4, 5,
- 6, 7, 8,
-
- 9, 10, 11,
- 12, 13, 14,
- 15, 16, 17,
-
- 18, 19, 20,
- 21, 22, 23,
- 24, 25, 26,
-
- 27, 28, 29,
- 30, 31, 32,
- 33, 34, 35,
- });
-
- std::vector<uint8_t> outputExpected = std::vector<uint8_t>(
- {
- 0, 1, 2, 3, 4, 5, 6, 7, 8,
-
- 9, 10, 11, 12, 13, 14, 15, 16, 17,
-
- 18, 19, 20, 21, 22, 23, 24, 25, 26,
-
- 27, 28, 29, 30, 31, 32, 33, 34, 35,
- });
-
- return SimpleReshapeTestImpl<uint8_t>(
- workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected);
-}
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 17be230271..23c975e668 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -308,8 +308,8 @@ ARMNN_AUTO_TEST_CASE(Concatenation4dDiffShapeDim3Uint8, Concatenation4dDiffShape
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
// Reshape
-ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>)
// Pad
ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 1ea227a14a..d89011d3ae 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -410,8 +410,8 @@ ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint
ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorUint8, GreaterBroadcast1DVectorUint8Test)
// Reshape
-ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>)
// Pad
ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 9a691a6fa7..a9cddfd1bb 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1021,11 +1021,12 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
{
ignore_unused(descriptor);
// Define supported output types.
- std::array<DataType,3> supportedOutputTypes =
+ std::array<DataType,4> supportedOutputTypes =
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
"Reference reshape: input type not supported.");
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 2222a22cb3..fef2567d07 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -661,16 +661,21 @@ static void RefCreateReshapeWorkloadTest()
TensorInfo({ 1, 4 }, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
+BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
{
RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
{
RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
}
+BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
+{
+ RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
+}
+
template <typename ConcatWorkloadType, armnn::DataType DataType>
static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
unsigned int concatAxis)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 1207c1d648..a78b5aae5d 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -449,8 +449,9 @@ ARMNN_AUTO_TEST_CASE(Concatenation4dDiffShapeDim3Uint8, Concatenation4dDiffShape
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
// Reshape
-ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<armnn::DataType::QuantisedSymm16>)
// Rsqrt
ARMNN_AUTO_TEST_CASE(Rsqrt2d, Rsqrt2dTest)