aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test')
-rw-r--r--src/backends/backendsCommon/test/ArithmeticTestImpl.hpp50
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp12
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp194
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp14
-rw-r--r--src/backends/backendsCommon/test/MergerTestImpl.hpp2
5 files changed, 162 insertions, 110 deletions
diff --git a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp
index 1d6cf1d99b..6f685ebb42 100644
--- a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp
@@ -17,7 +17,7 @@
namespace
{
-template<typename armnn::DataType DataType>
+template<armnn::DataType ArmnnTypeInput, armnn::DataType ArmnnTypeOutput>
INetworkPtr CreateArithmeticNetwork(const std::vector<TensorShape>& inputShapes,
const TensorShape& outputShape,
const LayerType type,
@@ -39,22 +39,25 @@ INetworkPtr CreateArithmeticNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
- TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset);
IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
Connect(input, arithmeticLayer, inputTensorInfo, 0, i);
}
- TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+ TensorInfo outputTensorInfo(outputShape, ArmnnTypeOutput, qScale, qOffset);
IConnectableLayer* output = net->AddOutputLayer(0, "output");
Connect(arithmeticLayer, output, outputTensorInfo, 0, 0);
return net;
}
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<armnn::DataType ArmnnInputType,
+ armnn::DataType ArmnnOutputType,
+ typename TInput = armnn::ResolveType<ArmnnInputType>,
+ typename TOutput = armnn::ResolveType<ArmnnOutputType>>
void ArithmeticSimpleEndToEnd(const std::vector<BackendId>& backends,
const LayerType type,
- const std::vector<T> expectedOutput)
+ const std::vector<TOutput> expectedOutput)
{
using namespace armnn;
@@ -62,26 +65,29 @@ void ArithmeticSimpleEndToEnd(const std::vector<BackendId>& backends,
const TensorShape& outputShape = { 2, 2, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateArithmeticNetwork<ArmnnType>(inputShapes, outputShape, type);
+ INetworkPtr net = CreateArithmeticNetwork<ArmnnInputType, ArmnnOutputType>(inputShapes, outputShape, type);
BOOST_TEST_CHECKPOINT("create a network");
- const std::vector<T> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
- 3, 3, 3, 3, 4, 4, 4, 4 });
+ const std::vector<TInput> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
+ 3, 3, 3, 3, 4, 4, 4, 4 });
- const std::vector<T> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
- 5, 5, 5, 5, 4, 4, 4, 4 });
+ const std::vector<TInput> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
+ 5, 5, 5, 5, 4, 4, 4, 4 });
- std::map<int, std::vector<T>> inputTensorData = {{ 0, input0 }, { 1, input1 }};
- std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
+ std::map<int, std::vector<TInput>> inputTensorData = {{ 0, input0 }, { 1, input1 }};
+ std::map<int, std::vector<TOutput>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<TInput, TOutput>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<armnn::DataType ArmnnInputType,
+ armnn::DataType ArmnnOutputType,
+ typename TInput = armnn::ResolveType<ArmnnInputType>,
+ typename TOutput = armnn::ResolveType<ArmnnOutputType>>
void ArithmeticBroadcastEndToEnd(const std::vector<BackendId>& backends,
const LayerType type,
- const std::vector<T> expectedOutput)
+ const std::vector<TOutput> expectedOutput)
{
using namespace armnn;
@@ -89,19 +95,19 @@ void ArithmeticBroadcastEndToEnd(const std::vector<BackendId>& backends,
const TensorShape& outputShape = { 1, 2, 2, 3 };
// Builds up the structure of the network
- INetworkPtr net = CreateArithmeticNetwork<ArmnnType>(inputShapes, outputShape, type);
+ INetworkPtr net = CreateArithmeticNetwork<ArmnnInputType, ArmnnOutputType>(inputShapes, outputShape, type);
BOOST_TEST_CHECKPOINT("create a network");
- const std::vector<T> input0({ 1, 2, 3, 1, 0, 6,
- 7, 8, 9, 10, 11, 12 });
+ const std::vector<TInput> input0({ 1, 2, 3, 1, 0, 6,
+ 7, 8, 9, 10, 11, 12 });
- const std::vector<T> input1({ 1, 1, 3 });
+ const std::vector<TInput> input1({ 1, 1, 3 });
- std::map<int, std::vector<T>> inputTensorData = {{ 0, input0 }, { 1, input1 }};
- std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
+ std::map<int, std::vector<TInput>> inputTensorData = {{ 0, input0 }, { 1, input1 }};
+ std::map<int, std::vector<TOutput>> expectedOutputData = {{ 0, expectedOutput }};
- EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<TInput, TOutput>(move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 15a3937aca..7d2b091e42 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -102,10 +102,10 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
);
}
-template<typename T>
+template<typename TInput, typename TOutput>
void EndToEndLayerTestImpl(INetworkPtr network,
- const std::map<int, std::vector<T>>& inputTensorData,
- const std::map<int, std::vector<T>>& expectedOutputData,
+ const std::map<int, std::vector<TInput>>& inputTensorData,
+ const std::map<int, std::vector<TOutput>>& expectedOutputData,
std::vector<BackendId> backends)
{
// Create runtime in which test will run
@@ -128,10 +128,10 @@ void EndToEndLayerTestImpl(INetworkPtr network,
}
OutputTensors outputTensors;
outputTensors.reserve(expectedOutputData.size());
- std::map<int, std::vector<T>> outputStorage;
+ std::map<int, std::vector<TOutput>> outputStorage;
for (auto&& it : expectedOutputData)
{
- std::vector<T> out(it.second.size());
+ std::vector<TOutput> out(it.second.size());
outputStorage.emplace(it.first, out);
outputTensors.push_back({it.first,
Tensor(runtime->GetOutputTensorInfo(netId, it.first),
@@ -144,7 +144,7 @@ void EndToEndLayerTestImpl(INetworkPtr network,
// Checks the results.
for (auto&& it : expectedOutputData)
{
- std::vector<T> out = outputStorage.at(it.first);
+ std::vector<TOutput> out = outputStorage.at(it.first);
BOOST_TEST(it.second == out);
}
}
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 95fa50b89c..6060b30928 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1783,66 +1783,98 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
}
namespace {
- template <typename Descriptor, armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
- LayerTestResult<T, 4> ElementwiseTestHelper
- (armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[4], std::vector<T> values0,
- const unsigned int shape1[4], std::vector<T> values1,
- const unsigned int outShape[4], std::vector<T> outValues,
- float qScale = 0.0f, int qOffset = 0)
- {
- const size_t dimensionCount = 4;
- armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnType};
- armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnType};
- armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnType};
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
+template <typename Descriptor,
+ armnn::DataType ArmnnTypeInput,
+ armnn::DataType ArmnnTypeOutput,
+ typename TInput = armnn::ResolveType<ArmnnTypeInput>,
+ typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
+LayerTestResult<TOutput, 4> ElementwiseTestHelper(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const unsigned int shape0[4], std::vector<TInput> values0,
+ const unsigned int shape1[4], std::vector<TInput> values1,
+ const unsigned int outShape[4], std::vector<TOutput> outValues,
+ float qScale = 0.0f, int qOffset = 0)
+{
+ const size_t dimensionCount = 4;
+ armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
+ armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
+ armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo0.SetQuantizationScale(qScale);
- inputTensorInfo0.SetQuantizationOffset(qOffset);
+ auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
- inputTensorInfo1.SetQuantizationScale(qScale);
- inputTensorInfo1.SetQuantizationOffset(qOffset);
+ if (armnn::IsQuantizedType<TInput>())
+ {
+ inputTensorInfo0.SetQuantizationScale(qScale);
+ inputTensorInfo0.SetQuantizationOffset(qOffset);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
- }
+ inputTensorInfo1.SetQuantizationScale(qScale);
+ inputTensorInfo1.SetQuantizationOffset(qOffset);
- LayerTestResult<T,4> ret(outputTensorInfo);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
+ }
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ LayerTestResult<TOutput,4> ret(outputTensorInfo);
- Descriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
+ if(ArmnnTypeOutput == armnn::DataType::Boolean)
+ {
+ ret.compareBoolean = true;
+ }
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
+ std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+ Descriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+ AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+ auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
+
+ inputHandle0->Allocate();
+ inputHandle1->Allocate();
+ outputHandle->Allocate();
- ExecuteWorkload(*workload, memoryManager);
+ CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ ExecuteWorkload(*workload, memoryManager);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
- return ret;
- }
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
+ return ret;
}
-LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
+LayerTestResult<T, 4> ElementwiseTestHelper(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const unsigned int shape0[4], std::vector<T> values0,
+ const unsigned int shape1[4], std::vector<T> values1,
+ const unsigned int outShape[4], std::vector<T> outValues,
+ float qScale = 0.0f, int qOffset = 0)
+{
+ return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
+ (workloadFactory,
+ memoryManager,
+ shape0,
+ values0,
+ shape1,
+ values1,
+ outShape,
+ outValues,
+ qScale,
+ qOffset);
+}
+}
+
+LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
const unsigned int width = 2;
const unsigned int height = 2;
@@ -1857,10 +1889,10 @@ LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFacto
std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
5, 5, 5, 5, 4, 4, 4, 4 });
- std::vector<float> output({ 1, 1, 1, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 1 });
+ std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape,
@@ -1871,7 +1903,7 @@ LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFacto
output);
}
-LayerTestResult<float, 4> EqualBroadcast1ElementTest(
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
@@ -1881,9 +1913,9 @@ LayerTestResult<float, 4> EqualBroadcast1ElementTest(
unsigned int shape1[] = { 1, 1, 1, 1 };
std::vector<float> input1({ 1 });
- std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
+ std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -1894,7 +1926,7 @@ LayerTestResult<float, 4> EqualBroadcast1ElementTest(
output);
}
-LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
@@ -1906,10 +1938,10 @@ LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
std::vector<float> input1({ 1, 2, 3});
- std::vector<float> output({ 1, 1, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 0 });
+ std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -1928,7 +1960,7 @@ LayerTestResult<uint8_t, 4> EqualUint8Test(
// See dequantized values to the right.
std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
- 3, 3, 3, 3, 5, 5, 5, 5 });
+ 3, 3, 3, 3, 7, 7, 7, 7 });
std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3, 3, 3, 3, 5, 5, 5, 5 });
@@ -1936,7 +1968,9 @@ LayerTestResult<uint8_t, 4> EqualUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
+ armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape,
@@ -1964,7 +1998,9 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
+ armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -1992,7 +2028,9 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
+ armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -2005,7 +2043,7 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
0);
}
-LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
const unsigned int width = 2;
@@ -2021,10 +2059,10 @@ LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFac
std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
5, 5, 5, 5, 4, 4, 4, 4 });
- std::vector<float> output({ 0, 0, 0, 0, 1, 1, 1, 1,
- 0, 0, 0, 0, 0, 0, 0, 0 });
+ std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape,
@@ -2035,7 +2073,7 @@ LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFac
output);
}
-LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
@@ -2045,9 +2083,9 @@ LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
unsigned int shape1[] = { 1, 1, 1, 1 };
std::vector<float> input1({ 1 });
- std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
+ std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -2058,7 +2096,7 @@ LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
output);
}
-LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
@@ -2070,10 +2108,10 @@ LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
std::vector<float> input1({ 1, 3, 2});
- std::vector<float> output({ 0, 0, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1 });
+ std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -2100,7 +2138,9 @@ LayerTestResult<uint8_t, 4> GreaterUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
+ armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape,
@@ -2128,7 +2168,9 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
+ armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -2156,7 +2198,9 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
+ armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::Boolean>(
workloadFactory,
memoryManager,
shape0,
@@ -2235,7 +2279,7 @@ LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
std::vector<float> input1({ 1, 2, 3});
std::vector<float> output({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
+ 7, 8, 9, 10, 11, 12 });
return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
workloadFactory,
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 16fe43212b..05d510e78e 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -47,11 +47,13 @@ struct LayerTestResult
output.resize(shape);
outputExpected.resize(shape);
supported = true;
+ compareBoolean = false;
}
boost::multi_array<T, n> output;
boost::multi_array<T, n> outputExpected;
bool supported;
+ bool compareBoolean;
};
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
@@ -909,15 +911,15 @@ LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor);
-LayerTestResult<float, 4> EqualSimpleTest(
+LayerTestResult<uint8_t, 4> EqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-LayerTestResult<float, 4> EqualBroadcast1ElementTest(
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -933,15 +935,15 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-LayerTestResult<float, 4> GreaterSimpleTest(
+LayerTestResult<uint8_t, 4> GreaterSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp
index 2bdfe286c9..ec42b09ada 100644
--- a/src/backends/backendsCommon/test/MergerTestImpl.hpp
+++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp
@@ -110,7 +110,7 @@ void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
- EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<T, T>(move(net), inputTensorData, expectedOutputData, backends);
}
template<armnn::DataType ArmnnType>