From 2b4d88e34ac1f965417fd236fd4786f26bae2042 Mon Sep 17 00:00:00 2001 From: kevmay01 Date: Thu, 24 Jan 2019 14:05:09 +0000 Subject: IVGCVSW-2503 Refactor RefElementwiseWorkload around Equal and Greater * Remove Equal and Greater from RefElementwiseWorkload * Create RefComparisonWorkload and add Equal and Greater * Update ElementwiseFunction for different input/output types * Update TfParser to create Equal/Greater with Boolean output * Update relevant tests to check for Boolean comparison Change-Id: I299b7f2121769c960ac0c6139764a5f3c89c9c32 --- src/backends/backendsCommon/MakeWorkloadHelper.hpp | 20 ++- src/backends/backendsCommon/Workload.hpp | 13 ++ src/backends/backendsCommon/WorkloadData.cpp | 10 ++ .../backendsCommon/test/ArithmeticTestImpl.hpp | 50 +++--- .../backendsCommon/test/EndToEndTestImpl.hpp | 12 +- src/backends/backendsCommon/test/LayerTests.cpp | 194 +++++++++++++-------- src/backends/backendsCommon/test/LayerTests.hpp | 14 +- .../backendsCommon/test/MergerTestImpl.hpp | 2 +- 8 files changed, 198 insertions(+), 117 deletions(-) (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp index 7784cc6d4d..2d54335355 100644 --- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp +++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp @@ -38,7 +38,7 @@ struct MakeWorkloadForType // Makes a workload for one the specified types based on the data type requirements of the tensorinfo. // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos. template + typename BooleanWorkload, typename QueueDescriptorType, typename... Args> std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args) @@ -47,8 +47,10 @@ std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descrip info.m_InputTensorInfos[0].GetDataType() : info.m_OutputTensorInfos[0].GetDataType(); - BOOST_ASSERT(info.m_InputTensorInfos.empty() || info.m_OutputTensorInfos.empty() - || info.m_InputTensorInfos[0].GetDataType() == info.m_OutputTensorInfos[0].GetDataType()); + BOOST_ASSERT(info.m_InputTensorInfos.empty() || + info.m_OutputTensorInfos.empty() || + ((info.m_InputTensorInfos[0].GetDataType() == info.m_OutputTensorInfos[0].GetDataType()) || + info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Boolean)); switch (dataType) { @@ -60,6 +62,8 @@ std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descrip return MakeWorkloadForType::Func(descriptor, info, std::forward(args)...); case DataType::Signed32: return MakeWorkloadForType::Func(descriptor, info, std::forward(args)...); + case DataType::Boolean: + return MakeWorkloadForType::Func(descriptor, info, std::forward(args)...); default: BOOST_ASSERT_MSG(false, "Unknown DataType."); return nullptr; @@ -67,16 +71,18 @@ std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descrip } // Makes a workload for one the specified types based on the data type requirements of the tensorinfo. -// Calling this method is the equivalent of calling the three typed MakeWorkload method with . +// Calling this method is the equivalent of calling the five typed MakeWorkload method with . // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos. template std::unique_ptr MakeWorkloadHelper(const QueueDescriptorType& descriptor, const WorkloadInfo& info, Args&&... args) { - return MakeWorkloadHelper(descriptor, info, - std::forward(args)...); + return MakeWorkloadHelper( + descriptor, + info, + std::forward(args)...); } } //namespace diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp index 34d13635ba..4d14adbf54 100644 --- a/src/backends/backendsCommon/Workload.hpp +++ b/src/backends/backendsCommon/Workload.hpp @@ -164,6 +164,19 @@ using Uint8Workload = TypedWorkload using Int32Workload = TypedWorkload; +template +using BooleanWorkload = TypedWorkload; + +template +using BaseFloat32ComparisonWorkload = MultiTypedWorkload; + +template +using BaseUint8ComparisonWorkload = MultiTypedWorkload; + template using Float16ToFloat32Workload = MultiTypedWorkload +template INetworkPtr CreateArithmeticNetwork(const std::vector& inputShapes, const TensorShape& outputShape, const LayerType type, @@ -39,22 +39,25 @@ INetworkPtr CreateArithmeticNetwork(const std::vector& inputShapes, for (unsigned int i = 0; i < inputShapes.size(); ++i) { - TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset); IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast(i)); Connect(input, arithmeticLayer, inputTensorInfo, 0, i); } - TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + TensorInfo outputTensorInfo(outputShape, ArmnnTypeOutput, qScale, qOffset); IConnectableLayer* output = net->AddOutputLayer(0, "output"); Connect(arithmeticLayer, output, outputTensorInfo, 0, 0); return net; } -template> +template, + typename TOutput = armnn::ResolveType> void ArithmeticSimpleEndToEnd(const std::vector& backends, const LayerType type, - const std::vector expectedOutput) + const std::vector expectedOutput) { using namespace armnn; @@ -62,26 +65,29 @@ void ArithmeticSimpleEndToEnd(const std::vector& backends, const TensorShape& outputShape = { 2, 2, 2, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateArithmeticNetwork(inputShapes, outputShape, type); + INetworkPtr net = CreateArithmeticNetwork(inputShapes, outputShape, type); BOOST_TEST_CHECKPOINT("create a network"); - const std::vector input0({ 1, 1, 1, 1, 5, 5, 5, 5, - 3, 3, 3, 3, 4, 4, 4, 4 }); + const std::vector input0({ 1, 1, 1, 1, 5, 5, 5, 5, + 3, 3, 3, 3, 4, 4, 4, 4 }); - const std::vector input1({ 1, 1, 1, 1, 3, 3, 3, 3, - 5, 5, 5, 5, 4, 4, 4, 4 }); + const std::vector input1({ 1, 1, 1, 1, 3, 3, 3, 3, + 5, 5, 5, 5, 4, 4, 4, 4 }); - std::map> inputTensorData = {{ 0, input0 }, { 1, input1 }}; - std::map> expectedOutputData = {{ 0, expectedOutput }}; + std::map> inputTensorData = {{ 0, input0 }, { 1, input1 }}; + std::map> expectedOutputData = {{ 0, expectedOutput }}; - EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } -template> +template, + typename TOutput = armnn::ResolveType> void ArithmeticBroadcastEndToEnd(const std::vector& backends, const LayerType type, - const std::vector expectedOutput) + const std::vector expectedOutput) { using namespace armnn; @@ -89,19 +95,19 @@ void ArithmeticBroadcastEndToEnd(const std::vector& backends, const TensorShape& outputShape = { 1, 2, 2, 3 }; // Builds up the structure of the network - INetworkPtr net = CreateArithmeticNetwork(inputShapes, outputShape, type); + INetworkPtr net = CreateArithmeticNetwork(inputShapes, outputShape, type); BOOST_TEST_CHECKPOINT("create a network"); - const std::vector input0({ 1, 2, 3, 1, 0, 6, - 7, 8, 9, 10, 11, 12 }); + const std::vector input0({ 1, 2, 3, 1, 0, 6, + 7, 8, 9, 10, 11, 12 }); - const std::vector input1({ 1, 1, 3 }); + const std::vector input1({ 1, 1, 3 }); - std::map> inputTensorData = {{ 0, input0 }, { 1, input1 }}; - std::map> expectedOutputData = {{ 0, expectedOutput }}; + std::map> inputTensorData = {{ 0, input0 }, { 1, input1 }}; + std::map> expectedOutputData = {{ 0, expectedOutput }}; - EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 15a3937aca..7d2b091e42 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -102,10 +102,10 @@ inline bool ConstantUsageUint8Test(const std::vector& backends) ); } -template +template void EndToEndLayerTestImpl(INetworkPtr network, - const std::map>& inputTensorData, - const std::map>& expectedOutputData, + const std::map>& inputTensorData, + const std::map>& expectedOutputData, std::vector backends) { // Create runtime in which test will run @@ -128,10 +128,10 @@ void EndToEndLayerTestImpl(INetworkPtr network, } OutputTensors outputTensors; outputTensors.reserve(expectedOutputData.size()); - std::map> outputStorage; + std::map> outputStorage; for (auto&& it : expectedOutputData) { - std::vector out(it.second.size()); + std::vector out(it.second.size()); outputStorage.emplace(it.first, out); outputTensors.push_back({it.first, Tensor(runtime->GetOutputTensorInfo(netId, it.first), @@ -144,7 +144,7 @@ void EndToEndLayerTestImpl(INetworkPtr network, // Checks the results. for (auto&& it : expectedOutputData) { - std::vector out = outputStorage.at(it.first); + std::vector out = outputStorage.at(it.first); BOOST_TEST(it.second == out); } } diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index 95fa50b89c..6060b30928 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -1783,66 +1783,98 @@ std::unique_ptr CreateWorkload( } namespace { - template > - LayerTestResult ElementwiseTestHelper - (armnn::IWorkloadFactory & workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, - const unsigned int shape0[4], std::vector values0, - const unsigned int shape1[4], std::vector values1, - const unsigned int outShape[4], std::vector outValues, - float qScale = 0.0f, int qOffset = 0) - { - const size_t dimensionCount = 4; - armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnType}; - armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnType}; - armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnType}; - auto input0 = MakeTensor(inputTensorInfo0, values0); - auto input1 = MakeTensor(inputTensorInfo1, values1); +template , + typename TOutput = armnn::ResolveType> +LayerTestResult ElementwiseTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + const unsigned int shape0[4], std::vector values0, + const unsigned int shape1[4], std::vector values1, + const unsigned int outShape[4], std::vector outValues, + float qScale = 0.0f, int qOffset = 0) +{ + const size_t dimensionCount = 4; + armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput}; + armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput}; + armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput}; - if (armnn::IsQuantizedType()) - { - inputTensorInfo0.SetQuantizationScale(qScale); - inputTensorInfo0.SetQuantizationOffset(qOffset); + auto input0 = MakeTensor(inputTensorInfo0, values0); + auto input1 = MakeTensor(inputTensorInfo1, values1); - inputTensorInfo1.SetQuantizationScale(qScale); - inputTensorInfo1.SetQuantizationOffset(qOffset); + if (armnn::IsQuantizedType()) + { + inputTensorInfo0.SetQuantizationScale(qScale); + inputTensorInfo0.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - } + inputTensorInfo1.SetQuantizationScale(qScale); + inputTensorInfo1.SetQuantizationOffset(qOffset); - LayerTestResult ret(outputTensorInfo); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } - std::unique_ptr inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0); - std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + LayerTestResult ret(outputTensorInfo); - Descriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get()); - AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - auto workload = CreateWorkload(workloadFactory, info, data); + if(ArmnnTypeOutput == armnn::DataType::Boolean) + { + ret.compareBoolean = true; + } - inputHandle0->Allocate(); - inputHandle1->Allocate(); - outputHandle->Allocate(); + std::unique_ptr inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0); + std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + Descriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get()); + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + auto workload = CreateWorkload(workloadFactory, info, data); + + inputHandle0->Allocate(); + inputHandle1->Allocate(); + outputHandle->Allocate(); - ExecuteWorkload(*workload, memoryManager); + CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + ExecuteWorkload(*workload, memoryManager); - ret.outputExpected = MakeTensor(outputTensorInfo, outValues); - return ret; - } + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + ret.outputExpected = MakeTensor(outputTensorInfo, outValues); + return ret; } -LayerTestResult EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +template > +LayerTestResult ElementwiseTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + const unsigned int shape0[4], std::vector values0, + const unsigned int shape1[4], std::vector values1, + const unsigned int outShape[4], std::vector outValues, + float qScale = 0.0f, int qOffset = 0) +{ + return ElementwiseTestHelper + (workloadFactory, + memoryManager, + shape0, + values0, + shape1, + values1, + outShape, + outValues, + qScale, + qOffset); +} +} + +LayerTestResult EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { const unsigned int width = 2; const unsigned int height = 2; @@ -1857,10 +1889,10 @@ LayerTestResult EqualSimpleTest(armnn::IWorkloadFactory& workloadFacto std::vector input1({ 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 4, 4, 4, 4 }); - std::vector output({ 1, 1, 1, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 1, 1, 1 }); + std::vector output({ 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 1 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape, @@ -1871,7 +1903,7 @@ LayerTestResult EqualSimpleTest(armnn::IWorkloadFactory& workloadFacto output); } -LayerTestResult EqualBroadcast1ElementTest( +LayerTestResult EqualBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -1881,9 +1913,9 @@ LayerTestResult EqualBroadcast1ElementTest( unsigned int shape1[] = { 1, 1, 1, 1 }; std::vector input1({ 1 }); - std::vector output({ 1, 0, 0, 0, 0, 0, 0, 0}); + std::vector output({ 1, 0, 0, 0, 0, 0, 0, 0}); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -1894,7 +1926,7 @@ LayerTestResult EqualBroadcast1ElementTest( output); } -LayerTestResult EqualBroadcast1DVectorTest( +LayerTestResult EqualBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -1906,10 +1938,10 @@ LayerTestResult EqualBroadcast1DVectorTest( std::vector input1({ 1, 2, 3}); - std::vector output({ 1, 1, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0 }); + std::vector output({ 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -1928,7 +1960,7 @@ LayerTestResult EqualUint8Test( // See dequantized values to the right. std::vector input0({ 1, 1, 1, 1, 6, 6, 6, 6, - 3, 3, 3, 3, 5, 5, 5, 5 }); + 3, 3, 3, 3, 7, 7, 7, 7 }); std::vector input1({ 2, 2, 2, 2, 6, 6, 6, 6, 3, 3, 3, 3, 5, 5, 5, 5 }); @@ -1936,7 +1968,9 @@ LayerTestResult EqualUint8Test( std::vector output({ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape, @@ -1964,7 +1998,9 @@ LayerTestResult EqualBroadcast1ElementUint8Test( std::vector output({ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -1992,7 +2028,9 @@ LayerTestResult EqualBroadcast1DVectorUint8Test( std::vector output({ 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -2005,7 +2043,7 @@ LayerTestResult EqualBroadcast1DVectorUint8Test( 0); } -LayerTestResult GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory, +LayerTestResult GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { const unsigned int width = 2; @@ -2021,10 +2059,10 @@ LayerTestResult GreaterSimpleTest(armnn::IWorkloadFactory& workloadFac std::vector input1({ 1, 1, 1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 4, 4, 4, 4 }); - std::vector output({ 0, 0, 0, 0, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0 }); + std::vector output({ 0, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape, @@ -2035,7 +2073,7 @@ LayerTestResult GreaterSimpleTest(armnn::IWorkloadFactory& workloadFac output); } -LayerTestResult GreaterBroadcast1ElementTest( +LayerTestResult GreaterBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -2045,9 +2083,9 @@ LayerTestResult GreaterBroadcast1ElementTest( unsigned int shape1[] = { 1, 1, 1, 1 }; std::vector input1({ 1 }); - std::vector output({ 0, 1, 1, 1, 1, 1, 1, 1}); + std::vector output({ 0, 1, 1, 1, 1, 1, 1, 1}); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -2058,7 +2096,7 @@ LayerTestResult GreaterBroadcast1ElementTest( output); } -LayerTestResult GreaterBroadcast1DVectorTest( +LayerTestResult GreaterBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -2070,10 +2108,10 @@ LayerTestResult GreaterBroadcast1DVectorTest( std::vector input1({ 1, 3, 2}); - std::vector output({ 0, 0, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1 }); + std::vector output({ 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -2100,7 +2138,9 @@ LayerTestResult GreaterUint8Test( std::vector output({ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape, @@ -2128,7 +2168,9 @@ LayerTestResult GreaterBroadcast1ElementUint8Test( std::vector output({ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -2156,7 +2198,9 @@ LayerTestResult GreaterBroadcast1DVectorUint8Test( std::vector output({ 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); - return ElementwiseTestHelper( + return ElementwiseTestHelper( workloadFactory, memoryManager, shape0, @@ -2235,7 +2279,7 @@ LayerTestResult MaximumBroadcast1DVectorTest( std::vector input1({ 1, 2, 3}); std::vector output({ 1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 11, 12 }); + 7, 8, 9, 10, 11, 12 }); return ElementwiseTestHelper( workloadFactory, diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 16fe43212b..05d510e78e 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -47,11 +47,13 @@ struct LayerTestResult output.resize(shape); outputExpected.resize(shape); supported = true; + compareBoolean = false; } boost::multi_array output; boost::multi_array outputExpected; bool supported; + bool compareBoolean; }; LayerTestResult SimpleConvolution2d3x5Test( @@ -909,15 +911,15 @@ LayerTestResult Concatenation3dDim2DiffInputDimsUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor); -LayerTestResult EqualSimpleTest( +LayerTestResult EqualSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult EqualBroadcast1ElementTest( +LayerTestResult EqualBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult EqualBroadcast1DVectorTest( +LayerTestResult EqualBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -933,15 +935,15 @@ LayerTestResult EqualBroadcast1DVectorUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult GreaterSimpleTest( +LayerTestResult GreaterSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult GreaterBroadcast1ElementTest( +LayerTestResult GreaterBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult GreaterBroadcast1DVectorTest( +LayerTestResult GreaterBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp index 2bdfe286c9..ec42b09ada 100644 --- a/src/backends/backendsCommon/test/MergerTestImpl.hpp +++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp @@ -110,7 +110,7 @@ void MergerDim0EndToEnd(const std::vector& backends) std::map> inputTensorData = {{ 0,inputData }, { 1,inputData }}; std::map> expectedOutputData = {{ 0,expectedOutput }}; - EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } template -- cgit v1.2.1