From 483c811ea6fd0e7801aac1afd979ed02a649064b Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 1 Jun 2021 09:24:52 +0100 Subject: IVGCVSW-5962 Remove boost::multi_array * Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan Signed-off-by: Sadik Armagan Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743 --- src/armnn/test/TensorHelpers.hpp | 159 +- src/armnn/test/UnitTests.hpp | 25 +- .../test/ParserFlatbuffersSerializeFixture.hpp | 10 +- .../test/ParserFlatbuffersFixture.hpp | 19 +- src/armnnUtils/ParserPrototxtFixture.hpp | 11 +- src/backends/aclCommon/test/MemCopyTestImpl.hpp | 29 +- src/backends/aclCommon/test/MemCopyTests.cpp | 12 +- .../backendsCommon/test/ActivationFixture.hpp | 23 +- .../test/QuantizedLstmEndToEndTestImpl.cpp | 23 +- .../test/layerTests/ActivationTestImpl.cpp | 195 ++- .../test/layerTests/AdditionTestImpl.cpp | 129 +- .../test/layerTests/ArgMinMaxTestImpl.cpp | 15 +- .../test/layerTests/BatchNormalizationTestImpl.cpp | 90 +- .../test/layerTests/BatchToSpaceNdTestImpl.hpp | 16 +- .../test/layerTests/CastTestImpl.cpp | 14 +- .../test/layerTests/ComparisonTestImpl.cpp | 20 +- .../test/layerTests/ConcatTestImpl.cpp | 510 +++--- .../test/layerTests/ConstantTestImpl.cpp | 94 +- .../test/layerTests/Conv2dTestImpl.cpp | 770 ++++----- .../test/layerTests/ConvertBf16ToFp32TestImpl.cpp | 23 +- .../test/layerTests/ConvertFp16ToFp32TestImpl.cpp | 28 +- .../test/layerTests/ConvertFp32ToBf16TestImpl.cpp | 28 +- .../test/layerTests/ConvertFp32ToFp16TestImpl.cpp | 28 +- .../test/layerTests/DebugTestImpl.cpp | 15 +- .../test/layerTests/DepthToSpaceTestImpl.cpp | 18 +- .../test/layerTests/DequantizeTestImpl.cpp | 13 +- .../layerTests/DetectionPostProcessTestImpl.hpp | 57 +- .../test/layerTests/ElementwiseTestImpl.hpp | 260 ++- .../test/layerTests/ElementwiseUnaryTestImpl.hpp | 18 +- .../test/layerTests/FakeQuantizationTestImpl.cpp | 27 +- .../test/layerTests/FillTestImpl.cpp | 25 +- .../test/layerTests/FloorTestImpl.cpp | 33 +- .../test/layerTests/FullyConnectedTestImpl.cpp | 130 +- .../test/layerTests/GatherTestImpl.cpp | 16 +- .../layerTests/InstanceNormalizationTestImpl.cpp | 19 +- .../test/layerTests/L2NormalizationTestImpl.cpp | 50 +- .../test/layerTests/LayerTestResult.hpp | 68 +- .../test/layerTests/LogSoftmaxTestImpl.cpp | 19 +- .../test/layerTests/LogicalTestImpl.cpp | 35 +- .../test/layerTests/LstmTestImpl.cpp | 1710 +++++++++----------- .../test/layerTests/MeanTestImpl.hpp | 16 +- .../test/layerTests/MultiplicationTestImpl.cpp | 24 +- .../test/layerTests/NormalizationTestImpl.cpp | 244 ++- .../test/layerTests/NormalizationTestImpl.hpp | 5 + .../backendsCommon/test/layerTests/PadTestImpl.cpp | 58 +- .../test/layerTests/PermuteTestImpl.hpp | 13 +- .../test/layerTests/Pooling2dTestImpl.cpp | 330 ++-- .../test/layerTests/PreluTestImpl.hpp | 33 +- .../test/layerTests/QuantizeTestImpl.cpp | 14 +- .../test/layerTests/RankTestImpl.cpp | 31 +- .../test/layerTests/RankTestImpl.hpp | 2 +- .../test/layerTests/ReduceSumTestImpl.cpp | 14 +- .../test/layerTests/ReductionTestImpl.cpp | 14 +- .../test/layerTests/ReshapeTestImpl.cpp | 14 +- .../test/layerTests/ResizeTestImpl.cpp | 23 +- .../test/layerTests/SliceTestImpl.cpp | 16 +- .../test/layerTests/SoftmaxTestImpl.cpp | 37 +- .../test/layerTests/SpaceToBatchNdTestImpl.cpp | 18 +- .../test/layerTests/SpaceToDepthTestImpl.cpp | 18 +- .../test/layerTests/SplitterTestImpl.cpp | 82 +- .../test/layerTests/StackTestImpl.cpp | 16 +- .../test/layerTests/StridedSliceTestImpl.cpp | 16 +- .../layerTests/TransposeConvolution2dTestImpl.cpp | 24 +- .../test/layerTests/TransposeTestImpl.hpp | 14 +- src/backends/cl/test/ClLayerTests.cpp | 1 + src/backends/cl/test/ClMemCopyTests.cpp | 12 +- src/backends/cl/test/OpenClTimerTest.cpp | 36 +- src/backends/neon/test/NeonLayerTests.cpp | 1 + src/backends/neon/test/NeonMemCopyTests.cpp | 12 +- src/backends/neon/test/NeonTimerTest.cpp | 6 +- src/backends/reference/test/RefLayerTests.cpp | 1 + 71 files changed, 3044 insertions(+), 2885 deletions(-) diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index ceb6d0f9d7..b8788e7826 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -5,19 +5,18 @@ #pragma once #include "PredicateResult.hpp" + #include #include #include #include -#include -#include -#include #include #include #include +#include #include constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f; @@ -70,56 +69,91 @@ bool SelectiveCompareBoolean(T a, T b) return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0))); }; -template -armnn::PredicateResult CompareTensors(const boost::multi_array& a, - const boost::multi_array& b, - bool compareBoolean = false, - bool isDynamic = false) +template +armnn::PredicateResult CompareTensors(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape, + bool compareBoolean = false, + bool isDynamic = false) { + if (actualData.size() != expectedData.size()) + { + armnn::PredicateResult res(false); + res.Message() << "Different data size [" + << actualData.size() + << "!=" + << expectedData.size() + << "]"; + return res; + } + + if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions()) + { + armnn::PredicateResult res(false); + res.Message() << "Different number of dimensions [" + << actualShape.GetNumDimensions() + << "!=" + << expectedShape.GetNumDimensions() + << "]"; + return res; + } + + if (actualShape.GetNumElements() != expectedShape.GetNumElements()) + { + armnn::PredicateResult res(false); + res.Message() << "Different number of elements [" + << actualShape.GetNumElements() + << "!=" + << expectedShape.GetNumElements() + << "]"; + return res; + } + + unsigned int numberOfDimensions = actualShape.GetNumDimensions(); + if (!isDynamic) { // Checks they are same shape. - for (unsigned int i = 0; - i < n; - i++) + for (unsigned int i = 0; i < numberOfDimensions; ++i) { - if (a.shape()[i] != b.shape()[i]) + if (actualShape[i] != expectedShape[i]) { armnn::PredicateResult res(false); res.Message() << "Different shapes [" - << a.shape()[i] + << actualShape[i] << "!=" - << b.shape()[i] + << expectedShape[i] << "]"; return res; } } } - // Now compares element-wise. - // Fun iteration over n dimensions. - std::array indices; - for (unsigned int i = 0; i < n; i++) + std::vector indices; + for (unsigned int i = 0; i < numberOfDimensions; i++) { - indices[i] = 0; + indices.emplace_back(0); } std::stringstream errorString; int numFailedElements = 0; constexpr int maxReportedDifferences = 3; + unsigned int index = 0; + // Compare data element by element. while (true) { bool comparison; // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans. if(compareBoolean) { - comparison = SelectiveCompareBoolean(a(indices), b(indices)); + comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]); } else { - comparison = SelectiveCompare(a(indices), b(indices)); + comparison = SelectiveCompare(actualData[index], expectedData[index]); } if (!comparison) @@ -133,34 +167,35 @@ armnn::PredicateResult CompareTensors(const boost::multi_array& a, errorString << ", "; } errorString << "["; - for (unsigned int i = 0; i < n; ++i) + for (unsigned int i = 0; i < numberOfDimensions; ++i) { errorString << indices[i]; - if (i != n - 1) + if (i != numberOfDimensions - 1) { errorString << ","; } } errorString << "]"; - errorString << " (" << +a(indices) << " != " << +b(indices) << ")"; + errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")"; } } - ++indices[n - 1]; - for (unsigned int i=n-1; i>0; i--) + ++indices[numberOfDimensions - 1]; + for (unsigned int i=numberOfDimensions-1; i>0; i--) { - if (indices[i] == a.shape()[i]) + if (indices[i] == actualShape[i]) { indices[i] = 0; ++indices[i - 1]; } } - - if (indices[0] == a.shape()[0]) + if (indices[0] == actualShape[0]) { break; } + + index++; } armnn::PredicateResult comparisonResult(true); @@ -178,64 +213,14 @@ armnn::PredicateResult CompareTensors(const boost::multi_array& a, return comparisonResult; } - -// Creates a boost::multi_array with the shape defined by the given TensorInfo. -template -boost::multi_array MakeTensor(const armnn::TensorInfo& tensorInfo) -{ - std::array shape; - - for (unsigned int i = 0; i < n; i++) - { - shape[i] = tensorInfo.GetShape()[i]; - } - - return boost::multi_array(shape); -} - -// Creates a boost::multi_array with the shape defined by the given TensorInfo and contents defined by the given vector. -template -boost::multi_array MakeTensor( - const armnn::TensorInfo& tensorInfo, const std::vector& flat, bool isDynamic = false) -{ - if (!isDynamic) - { - ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor"); - } - - std::array shape; - - // NOTE: tensorInfo.GetNumDimensions() might be different from n - const unsigned int returnDimensions = static_cast(n); - const unsigned int actualDimensions = tensorInfo.GetNumDimensions(); - - const unsigned int paddedDimensions = - returnDimensions > actualDimensions ? returnDimensions - actualDimensions : 0u; - - for (unsigned int i = 0u; i < returnDimensions; i++) - { - if (i < paddedDimensions) - { - shape[i] = 1u; - } - else - { - shape[i] = tensorInfo.GetShape()[i - paddedDimensions]; - } - } - - boost::const_multi_array_ref arrayRef(&flat[0], shape); - return boost::multi_array(arrayRef); -} - -template -boost::multi_array MakeRandomTensor(const armnn::TensorInfo& tensorInfo, - unsigned int seed, - float min = -10.0f, - float max = 10.0f) +template +std::vector MakeRandomTensor(const armnn::TensorInfo& tensorInfo, + unsigned int seed, + float min = -10.0f, + float max = 10.0f) { - boost::random::mt19937 gen(seed); - boost::random::uniform_real_distribution dist(min, max); + std::mt19937 gen(seed); + std::uniform_real_distribution dist(min, max); std::vector init(tensorInfo.GetNumElements()); for (unsigned int i = 0; i < init.size(); i++) @@ -246,5 +231,5 @@ boost::multi_array MakeRandomTensor(const armnn::TensorInfo& tensorInfo, const float qScale = tensorInfo.GetQuantizationScale(); const int32_t qOffset = tensorInfo.GetQuantizationOffset(); - return MakeTensor(tensorInfo, armnnUtils::QuantizedVector(init, qScale, qOffset)); + return armnnUtils::QuantizedVector(init, qScale, qOffset); } diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp index b55b13d4c8..bb91c4d055 100644 --- a/src/armnn/test/UnitTests.hpp +++ b/src/armnn/test/UnitTests.hpp @@ -11,7 +11,9 @@ #include #include + #include "TensorHelpers.hpp" + #include inline void ConfigureLoggingTest() @@ -38,11 +40,15 @@ template void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult& testResult) { bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; - BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.supported, - "The test name does not match the supportedness it is reporting"); - if (testResult.supported) + BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported, + "The test name does not match the supportedness it is reporting"); + if (testResult.m_Supported) { - auto result = CompareTensors(testResult.output, testResult.outputExpected, testResult.compareBoolean); + auto result = CompareTensors(testResult.m_ActualData, + testResult.m_ExpectedData, + testResult.m_ActualShape, + testResult.m_ExpectedShape, + testResult.m_CompareBoolean); BOOST_TEST(result.m_Result, result.m_Message.str()); } } @@ -53,11 +59,14 @@ void CompareTestResultIfSupported(const std::string& testName, const std::vector bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; for (unsigned int i = 0; i < testResult.size(); ++i) { - BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].supported, - "The test name does not match the supportedness it is reporting"); - if (testResult[i].supported) + BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported, + "The test name does not match the supportedness it is reporting"); + if (testResult[i].m_Supported) { - auto result = CompareTensors(testResult[i].output, testResult[i].outputExpected); + auto result = CompareTensors(testResult[i].m_ActualData, + testResult[i].m_ExpectedData, + testResult[i].m_ActualShape, + testResult[i].m_ExpectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); } } diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp index 5f5ec1c5f4..a62cb96eb6 100644 --- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp +++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp @@ -20,6 +20,7 @@ #include +#include using armnnDeserializer::IDeserializer; using TensorRawPtr = armnnSerializer::TensorInfo*; @@ -218,14 +219,14 @@ void ParserFlatbuffersSerializeFixture::RunTest( } // Allocate storage for the output tensors to be written to and setup the armnn output tensors. - std::map> outputStorage; + std::map> outputStorage; armnn::OutputTensors outputTensors; for (auto&& it : expectedOutputData) { armnn::BindingPointInfo bindingInfo = ConvertBindingInfo( m_Parser->GetNetworkOutputBindingInfo(layersId, it.first)); armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnOutputType); - outputStorage.emplace(it.first, MakeTensor(bindingInfo.second)); + outputStorage.emplace(it.first, std::vector(bindingInfo.second.GetNumElements())); outputTensors.push_back( { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) }); } @@ -237,8 +238,9 @@ void ParserFlatbuffersSerializeFixture::RunTest( { armnn::BindingPointInfo bindingInfo = ConvertBindingInfo( m_Parser->GetNetworkOutputBindingInfo(layersId, it.first)); - auto outputExpected = MakeTensor(bindingInfo.second, it.second); - auto result = CompareTensors(outputExpected, outputStorage[it.first]); + auto outputExpected = it.second; + auto result = CompareTensors(outputExpected, outputStorage[it.first], + bindingInfo.second.GetShape(), bindingInfo.second.GetShape()); BOOST_TEST(result.m_Result, result.m_Message.str()); } } diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp index f333ac0d40..196af190fd 100644 --- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp +++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp @@ -293,7 +293,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, FillInputTensors(inputTensors, inputData, subgraphId); // Allocate storage for the output tensors to be written to and setup the armnn output tensors. - std::map> outputStorage; + std::map> outputStorage; armnn::OutputTensors outputTensors; for (auto&& it : expectedOutputData) { @@ -309,7 +309,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, it.first)); armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2); - outputStorage.emplace(it.first, MakeTensor(outputTensorInfo)); + outputStorage.emplace(it.first, std::vector(outputTensorInfo.GetNumElements())); outputTensors.push_back( { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) }); } @@ -320,8 +320,10 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, for (auto&& it : expectedOutputData) { armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first); - auto outputExpected = MakeTensor(bindingInfo.second, it.second, isDynamic); - auto result = CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic); + auto outputExpected = it.second; + auto result = CompareTensors(outputExpected, outputStorage[it.first], + bindingInfo.second.GetShape(), bindingInfo.second.GetShape(), + false, isDynamic); BOOST_TEST(result.m_Result, result.m_Message.str()); } } @@ -393,7 +395,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, FillInputTensors(inputTensors, input2Data, subgraphId); // Allocate storage for the output tensors to be written to and setup the armnn output tensors. - std::map> outputStorage; + std::map> outputStorage; armnn::OutputTensors outputTensors; for (auto&& it : expectedOutputData) { @@ -409,7 +411,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, it.first)); armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType); - outputStorage.emplace(it.first, MakeTensor(outputTensorInfo)); + outputStorage.emplace(it.first, std::vector(outputTensorInfo.GetNumElements())); outputTensors.push_back( { outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) }); } @@ -420,8 +422,9 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, for (auto&& it : expectedOutputData) { armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first); - auto outputExpected = MakeTensor(bindingInfo.second, it.second); - auto result = CompareTensors(outputExpected, outputStorage[it.first], false); + auto outputExpected = it.second; + auto result = CompareTensors(outputExpected, outputStorage[it.first], + bindingInfo.second.GetShape(), bindingInfo.second.GetShape(), false); BOOST_TEST(result.m_Result, result.m_Message.str()); } } \ No newline at end of file diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp index ad991efa36..0ff7e59ac2 100644 --- a/src/armnnUtils/ParserPrototxtFixture.hpp +++ b/src/armnnUtils/ParserPrototxtFixture.hpp @@ -193,12 +193,12 @@ void ParserPrototxtFixture::RunTest(const std::map> outputStorage; + std::map> outputStorage; armnn::OutputTensors outputTensors; for (auto&& it : expectedOutputData) { armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first); - outputStorage.emplace(it.first, MakeTensor(bindingInfo.second)); + outputStorage.emplace(it.first, std::vector(bindingInfo.second.GetNumElements())); outputTensors.push_back( { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) }); } @@ -252,15 +252,16 @@ void ParserPrototxtFixture::RunTest(const std::map(bindingInfo.second, it.second); + auto outputExpected = it.second; + auto shape = bindingInfo.second.GetShape(); if (std::is_same::value) { - auto result = CompareTensors(outputExpected, outputStorage[it.first], true); + auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape, true); BOOST_TEST(result.m_Result, result.m_Message.str()); } else { - auto result = CompareTensors(outputExpected, outputStorage[it.first]); + auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape); BOOST_TEST(result.m_Result, result.m_Message.str()); } } diff --git a/src/backends/aclCommon/test/MemCopyTestImpl.hpp b/src/backends/aclCommon/test/MemCopyTestImpl.hpp index 1f542d24b4..91ba4eae17 100644 --- a/src/backends/aclCommon/test/MemCopyTestImpl.hpp +++ b/src/backends/aclCommon/test/MemCopyTestImpl.hpp @@ -15,8 +15,6 @@ #include -#include - namespace { @@ -28,21 +26,20 @@ LayerTestResult MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory, const std::array shapeData = { { 1u, 1u, 6u, 5u } }; const armnn::TensorShape tensorShape(4, shapeData.data()); const armnn::TensorInfo tensorInfo(tensorShape, dataType); - boost::multi_array inputData = MakeTensor(tensorInfo, std::vector( - { - 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, - }) - ); + std::vector inputData = + { + 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, + }; LayerTestResult ret(tensorInfo); - ret.outputExpected = inputData; + ret.m_ExpectedData = inputData; - boost::multi_array outputData(shapeData); + std::vector actualOutput(tensorInfo.GetNumElements()); ARMNN_NO_DEPRECATE_WARN_BEGIN auto inputTensorHandle = srcWorkloadFactory.CreateTensorHandle(tensorInfo); @@ -71,8 +68,8 @@ LayerTestResult MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory, dstWorkloadFactory.CreateMemCopy(memCopyQueueDesc, workloadInfo)->Execute(); - CopyDataFromITensorHandle(outputData.data(), workloadOutput.get()); - ret.output = outputData; + CopyDataFromITensorHandle(actualOutput.data(), workloadOutput.get()); + ret.m_ActualData = actualOutput; return ret; } diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp index ffba19323a..7612cbfe28 100644 --- a/src/backends/aclCommon/test/MemCopyTests.cpp +++ b/src/backends/aclCommon/test/MemCopyTests.cpp @@ -48,7 +48,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu) { LayerTestResult result = MemCopyTest(false); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -56,7 +57,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon) { LayerTestResult result = MemCopyTest(false); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -64,7 +66,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -72,7 +75,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors) { LayerTestResult result = MemCopyTest(true); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } diff --git a/src/backends/backendsCommon/test/ActivationFixture.hpp b/src/backends/backendsCommon/test/ActivationFixture.hpp index d28174d6a6..c61f3f097e 100644 --- a/src/backends/backendsCommon/test/ActivationFixture.hpp +++ b/src/backends/backendsCommon/test/ActivationFixture.hpp @@ -11,20 +11,13 @@ #include -#include - struct ActivationFixture { ActivationFixture() { - auto boostArrayExtents = boost::extents - [armnn::numeric_cast(batchSize)] - [armnn::numeric_cast(channels)] - [armnn::numeric_cast(height)] - [armnn::numeric_cast(width)]; - output.resize(boostArrayExtents); - outputExpected.resize(boostArrayExtents); - input.resize(boostArrayExtents); + output.resize(batchSize * channels * height * width); + outputExpected.resize(batchSize * channels * height * width); + input.resize(batchSize * channels * height * width); unsigned int inputShape[] = { batchSize, channels, height, width }; unsigned int outputShape[] = { batchSize, channels, height, width }; @@ -32,7 +25,7 @@ struct ActivationFixture inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - input = MakeRandomTensor(inputTensorInfo, 21453); + input = MakeRandomTensor(inputTensorInfo, 21453); } unsigned int width = 17; @@ -40,9 +33,9 @@ struct ActivationFixture unsigned int channels = 2; unsigned int batchSize = 5; - boost::multi_array output; - boost::multi_array outputExpected; - boost::multi_array input; + std::vector output; + std::vector outputExpected; + std::vector input; armnn::TensorInfo inputTensorInfo; armnn::TensorInfo outputTensorInfo; @@ -57,6 +50,6 @@ struct PositiveActivationFixture : public ActivationFixture { PositiveActivationFixture() { - input = MakeRandomTensor(inputTensorInfo, 2342423, 0.0f, 1.0f); + input = MakeRandomTensor(inputTensorInfo, 2342423, 0.0f, 1.0f); } }; \ No newline at end of file diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp index 404a412ca0..c68051c8ca 100644 --- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp @@ -24,14 +24,12 @@ namespace { -using MultiArray = const boost::multi_array&; - -armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input, - MultiArray expectedOutput) +armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape, + armnn::TensorShape& outputExpectedShape) { - auto batchSize = armnn::numeric_cast(input.shape()[0]); - auto inputSize = armnn::numeric_cast(input.shape()[1]); - auto outputSize = armnn::numeric_cast(expectedOutput.shape()[1]); + auto batchSize = armnn::numeric_cast(inputShape[0]); + auto inputSize = armnn::numeric_cast(inputShape[1]); + auto outputSize = armnn::numeric_cast(outputExpectedShape[1]); float inputOutputScale = 0.0078125f; int32_t inputOutputOffset = 128; @@ -182,26 +180,21 @@ void QuantizedLstmEndToEnd(const std::vector& backends) { std::vector inputVector = {166, 179, 50, 150}; armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8); - boost::multi_array input = MakeTensor(inputDesc, inputVector); std::vector cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16); - boost::multi_array cellStateIn = MakeTensor(cellStateInDesc, cellStateInVector); std::vector outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8); - boost::multi_array outputStateIn = MakeTensor(outputStateInDesc, outputStateInVector); std::vector cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16); - boost::multi_array cellStateOut = MakeTensor(cellStateOutVectorDesc, cellStateOutVector); std::vector outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8); - boost::multi_array outputStateOut = MakeTensor(outputDesc, outputStateOutVector); // Builds up the structure of the network - armnn::INetworkPtr net = CreateQuantizedLstmNetwork(input, outputStateOut); + armnn::INetworkPtr net = CreateQuantizedLstmNetwork(inputDesc.GetShape(), outputDesc.GetShape()); BOOST_TEST_CHECKPOINT("create a network"); @@ -227,8 +220,8 @@ void QuantizedLstmEndToEnd(const std::vector& backends) outputTensors.reserve(2); //output - std::vector cellStateOutResult(cellStateOutVector.size()); - std::vector outputStateOutResult(outputStateOutVector.size()); + std::vector cellStateOutResult(cellStateOutVector.size()); + std::vector outputStateOutResult(outputStateOutVector.size()); outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), cellStateOutResult.data())}); outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), outputStateOutResult.data())}); diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp index 6d83b1ca99..54052073a9 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp @@ -17,8 +17,6 @@ #include -#include - #include template> @@ -58,9 +56,7 @@ LayerTestResult BoundedReLuTestCommon( outputTensorInfo.SetQuantizationOffset(outputOffset); } - LayerTestResult result(inputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, inputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -80,15 +76,16 @@ LayerTestResult BoundedReLuTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - - result.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputExpectedData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult BoundedReLuUpperAndLowerBoundTest( @@ -245,7 +242,7 @@ struct BoundedReLuRandomInputTestTraits } }; -boost::multi_array BoundedReLuRandomInputTest( +std::vector BoundedReLuRandomInputTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, @@ -257,11 +254,10 @@ boost::multi_array BoundedReLuRandomInputTest( const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo(); const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo(); - boost::multi_array output(GetTensorShapeAsArray<4>(outputTensorInfo)); - // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu // range [lowerBound, upperBound]. - auto input = MakeRandomTensor(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f); + std::vector input = MakeRandomTensor(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -278,13 +274,13 @@ boost::multi_array BoundedReLuRandomInputTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return output; + return actualOutput; } } // namespace @@ -305,16 +301,16 @@ LayerTestResult CompareBoundedReLuTest( activationDescriptor.m_A = upperBound; activationDescriptor.m_B = lowerBound; - result.output = BoundedReLuRandomInputTest( + result.m_ActualData = BoundedReLuRandomInputTest( workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor); - result.outputExpected = BoundedReLuRandomInputTest( + result.m_ExpectedData = BoundedReLuRandomInputTest( refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor); return result; } template> -LayerTestResult ConstantLinearActivationTestCommon( +LayerTestResult ConstantLinearActivationTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, @@ -344,7 +340,6 @@ LayerTestResult ConstantLinearActivationTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - LayerTestResult ret(outputTensorInfo); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -362,17 +357,20 @@ LayerTestResult ConstantLinearActivationTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - boost::multi_array input = MakeRandomTensor(inputTensorInfo, 7123561); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + std::vector input = MakeRandomTensor(inputTensorInfo, 7123561); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - workload->Execute(); + CopyDataToITensorHandle(inputHandle.get(), input.data()); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + workload->Execute(); - // Ensure output equals input. - ret.outputExpected = input; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + // Use input as ExpectedData as tensor doesn't change. + return LayerTestResult(actualOutput, + input, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult ConstantLinearActivationTest( @@ -441,9 +439,11 @@ LayerTestResult SimpleActivationTest( outputTensorInfo.SetQuantizationOffset(outOffset); } - LayerTestResult result(inputTensorInfo); + std::vector input = armnnUtils::QuantizedVector(inputData, scale, offset); - auto input = MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, scale, offset)); + // Calculated outputExpected manually. + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector outputExpected = armnnUtils::QuantizedVector(outputExpectedData, outScale, outOffset); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -463,17 +463,16 @@ LayerTestResult SimpleActivationTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - - // Calculated manually. - result.outputExpected = - MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector(outputExpectedData, outScale, outOffset)); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputExpected, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -497,8 +496,8 @@ LayerTestResult SimpleSigmoidTestCommon( { return 1.0f / (1.0f + std::exp(-value)); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector m_OutputExpected(inputData.size()); + std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -511,7 +510,7 @@ LayerTestResult SimpleSigmoidTestCommon( inputData, 1.f / 256.f, 0, - outputExpectedData); + m_OutputExpected); } LayerTestResult SimpleSigmoidTest( @@ -561,8 +560,8 @@ LayerTestResult ReLuTestCommon( { return std::fmax(0.0f, value); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector outputExpected(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -575,7 +574,7 @@ LayerTestResult ReLuTestCommon( inputData, qScale, qOffset, - outputExpectedData); + outputExpected); } LayerTestResult ReLuInt16Test( @@ -625,8 +624,8 @@ LayerTestResult BoundedReLuTestCommon( { return std::min(a, std::max(b, value)); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector outputExpected(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -639,7 +638,7 @@ LayerTestResult BoundedReLuTestCommon( inputData, qScale, qOffset, - outputExpectedData); + outputExpected); } LayerTestResult BoundedReLuInt16Test( @@ -672,8 +671,8 @@ LayerTestResult SoftReLuTestCommon( { return std::log(1.0f + std::exp(value)); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector outputExpected(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -686,7 +685,7 @@ LayerTestResult SoftReLuTestCommon( inputData, qScale, qOffset, - outputExpectedData); + outputExpected); } LayerTestResult SoftReLuTest( @@ -735,8 +734,8 @@ LayerTestResult LeakyReLuTestCommon( { return value > 0.0f ? value : (value * a); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector outputExpected(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -749,7 +748,7 @@ LayerTestResult LeakyReLuTestCommon( inputData, qScale, qOffset, - outputExpectedData); + outputExpected); } LayerTestResult LeakyReLuTest( @@ -797,8 +796,8 @@ LayerTestResult AbsTestCommon( { return std::abs(value); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector outputExpected(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -811,7 +810,7 @@ LayerTestResult AbsTestCommon( inputData, qScale, qOffset, - outputExpectedData); + outputExpected); } LayerTestResult AbsTest( @@ -856,17 +855,15 @@ LayerTestResult SqrtNNTest( { return std::sqrt(value); }; - std::vector outputExpectedData(inputDataSize); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector expectedOutput(inputDataSize); + std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f); armnn::TensorInfo inputTensorInfo( { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32); armnn::TensorInfo outputTensorInfo( { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32); - LayerTestResult result(inputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, inputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -883,16 +880,16 @@ LayerTestResult SqrtNNTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get()); - - // Calculated manually. - result.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); }; template> @@ -915,8 +912,8 @@ LayerTestResult SqrtTestCommon( { return std::sqrt(value); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector expectedOutput(inputData.size()); + std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -929,7 +926,7 @@ LayerTestResult SqrtTestCommon( inputData, qScale, qOffset, - outputExpectedData); + expectedOutput); } LayerTestResult SqrtTest( @@ -976,8 +973,8 @@ LayerTestResult SquareTestCommon( { return std::pow(value,2); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector expectedOutput(inputData.size()); + std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -990,7 +987,7 @@ LayerTestResult SquareTestCommon( inputData, qScale, qOffset, - outputExpectedData); + expectedOutput); } LayerTestResult SquareTest( @@ -1040,8 +1037,8 @@ LayerTestResult TanhTestCommon( { return a * tanhf(b * value); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector expectedOutput(inputData.size()); + std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -1054,7 +1051,7 @@ LayerTestResult TanhTestCommon( inputData, qScale, qOffset, - outputExpectedData); + expectedOutput); } LayerTestResult TanhTest( @@ -1104,8 +1101,8 @@ LayerTestResult EluTestCommon( { return (value >= 0) ? value : a * (expf(value) - 1); }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector expectedOutput(inputData.size()); + std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -1118,7 +1115,7 @@ LayerTestResult EluTestCommon( inputData, qScale, qOffset, - outputExpectedData); + expectedOutput); } LayerTestResult EluTest( @@ -1172,8 +1169,8 @@ LayerTestResult HardSwishTestCommon( float result = hardSwish_step1 / 6; return result; }; - std::vector outputExpectedData(inputData.size()); - std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + std::vector expectedOutput(inputData.size()); + std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f); return SimpleActivationTest(workloadFactory, memoryManager, @@ -1186,7 +1183,7 @@ LayerTestResult HardSwishTestCommon( inputData, qScale, qOffset, - outputExpectedData); + expectedOutput); } LayerTestResult HardSwishTest( @@ -1216,7 +1213,7 @@ LayerTestResult HardSwishInt16Test( template> -LayerTestResult CompareActivationTestImpl( +LayerTestResult CompareActivationTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, @@ -1258,17 +1255,9 @@ LayerTestResult CompareActivationTestImpl( minVal = 0.f; } - boost::multi_array input = MakeRandomTensor(inputTensorInfo, 21453, minVal, 10.f); - - - LayerTestResult ret(outputTensorInfo); - auto boostArrayExtents = boost::extents - [armnn::numeric_cast(batchSize)] - [armnn::numeric_cast(channels)] - [armnn::numeric_cast(height)] - [armnn::numeric_cast(width)]; - ret.output.resize(boostArrayExtents); - ret.outputExpected.resize(boostArrayExtents); + std::vector input = MakeRandomTensor(inputTensorInfo, 21453, minVal, 10.f); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -1299,19 +1288,23 @@ LayerTestResult CompareActivationTestImpl( inputHandleRef->Allocate(); outputHandleRef->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); workload->Execute(); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); - return ret; } -LayerTestResult CompareActivationTest( +LayerTestResult CompareActivationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, @@ -1325,7 +1318,7 @@ LayerTestResult CompareActivationTest( refTensorHandleFactory, f, batchSize); } -LayerTestResult CompareActivationUint8Test( +LayerTestResult CompareActivationUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, @@ -1338,7 +1331,7 @@ LayerTestResult CompareActivationUint8Test( tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50); } -LayerTestResult CompareActivationInt16Test( +LayerTestResult CompareActivationInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp index 0e1b7336de..ce8f74d2e0 100644 --- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp @@ -186,7 +186,7 @@ LayerTestResult AdditionBroadcastTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input1 = MakeTensor(inputTensorInfo1, armnnUtils::QuantizedVector( + auto input1 = armnnUtils::QuantizedVector( { 0.0f, 1.0f, @@ -197,17 +197,18 @@ LayerTestResult AdditionBroadcastTestImpl( 4.0f, 5.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto input2 = MakeTensor(inputTensorInfo2, armnnUtils::QuantizedVector( + auto input2 = armnnUtils::QuantizedVector( { 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, }, - qScale, qOffset)); + qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector( + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + auto expectedOutput = armnnUtils::QuantizedVector( { 0.5f, 1.5f, 2.5f, 4.5f, 5.5f, 6.5f, @@ -218,7 +219,7 @@ LayerTestResult AdditionBroadcastTestImpl( 4.5f, 5.5f, 6.5f, 8.5f, 9.5f, 10.5f, }, - qScale, qOffset)); + qScale, qOffset); std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); std::unique_ptr inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); @@ -236,15 +237,18 @@ LayerTestResult AdditionBroadcastTestImpl( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -270,7 +274,7 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input1 = MakeTensor(inputTensorInfo1, armnnUtils::QuantizedVector( + auto input1 = armnnUtils::QuantizedVector( { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, @@ -279,16 +283,17 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto input2 = MakeTensor(inputTensorInfo2, armnnUtils::QuantizedVector( + auto input2 = armnnUtils::QuantizedVector( { 0.5f, }, - qScale, qOffset)); + qScale, qOffset); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector( + auto expectedOutput = armnnUtils::QuantizedVector( { 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, @@ -297,7 +302,7 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( 12.5f, 13.5f, 14.5f, 15.5f, 16.5f, 17.5f, }, - qScale, qOffset)); + qScale, qOffset); std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); std::unique_ptr inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); @@ -315,15 +320,18 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult AdditionBroadcastTest( @@ -545,11 +553,10 @@ LayerTestResult AdditionAfterMaxPoolTest( armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32); armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32); - boost::multi_array poolingInput = MakeTensor(poolingInputTensorInfo, - {1, 2, 3, - 4, 5, 6, - 7, 8, 9 - }); + std::vector poolingInput = {1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }; std::unique_ptr poolingInputHandle = tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo); std::unique_ptr poolingOutputHandle = @@ -575,37 +582,26 @@ LayerTestResult AdditionAfterMaxPoolTest( // Create the MaxPool std::unique_ptr workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo); - //LayerTestResult result(poolingOutputTensorInfo); - auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo)); - boost::multi_array resultMaxPool; - resultMaxPool.resize(shape); - + std::vector resultMaxPool(poolingOutputTensorInfo.GetNumElements()); // Create addition with another tensor the same size // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1 // with the initial tensor. // 12, 16 // 24, 28 + armnn::TensorInfo addInputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32); + armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32); - armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32); - armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32); - - boost::multi_array addInput = MakeTensor(addInputTensorInfo, - {12, 16, - 24, 28, - }); + std::vector addInput = { 12, 16, + 24, 28 }; // Expected output tensor after MaxPool and Addition. - LayerTestResult addRet(addOutputTensorInfo); - addRet.outputExpected = MakeTensor(addOutputTensorInfo, std::vector( - { - 13, 19, - 31, 37 - })); + std::vector actualOutput(addOutputTensorInfo.GetNumElements()); + std::vector expectedOutput = { 13, 19, + 31, 37 }; std::unique_ptr addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo); - std::unique_ptr addOutputHandle = - tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo); + std::unique_ptr addOutputHandle = tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo); armnn::AdditionQueueDescriptor data; armnn::WorkloadInfo info; @@ -622,20 +618,23 @@ LayerTestResult AdditionAfterMaxPoolTest( addInputHandle->Allocate(); addOutputHandle->Allocate(); - CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]); - CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get()); + CopyDataToITensorHandle(poolingInputHandle.get(), poolingInput.data()); + CopyDataFromITensorHandle(resultMaxPool.data(), poolingOutputHandle.get()); - CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]); - CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]); + CopyDataToITensorHandle(poolingOutputHandle.get(), resultMaxPool.data()); + CopyDataToITensorHandle(addInputHandle.get(), addInput.data()); workload->PostAllocationConfigure(); workload->Execute(); addWorkload->PostAllocationConfigure(); addWorkload->Execute(); - CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), addOutputHandle.get()); - return addRet; + return LayerTestResult(actualOutput, + expectedOutput, + addOutputHandle->GetShape(), + addOutputTensorInfo.GetShape()); } LayerTestResult CompareAdditionTest( @@ -660,10 +659,11 @@ LayerTestResult CompareAdditionTest( inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); - auto input1 = MakeRandomTensor(inputTensorInfo1, 1232); - auto input2 = MakeRandomTensor(inputTensorInfo2, 456); + auto input1 = MakeRandomTensor(inputTensorInfo1, 1232); + auto input2 = MakeRandomTensor(inputTensorInfo2, 456); - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); std::unique_ptr inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2); @@ -695,18 +695,21 @@ LayerTestResult CompareAdditionTest( inputHandle2Ref->Allocate(); outputHandleRef->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); - CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]); - CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); + CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2Ref.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); workloadRef->PostAllocationConfigure(); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp index d63cc04e99..34b2539c32 100644 --- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -27,10 +27,8 @@ LayerTestResult ArgMinMaxTestCommon( const std::vector& outputData, int axis = 3) { - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); + std::vector inputTensor = ConvertToDataType(inputData, inputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -48,14 +46,17 @@ LayerTestResult ArgMinMaxTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // namespace diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp index 969d5dbcd1..4311faff4e 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp @@ -58,18 +58,16 @@ LayerTestResult BatchNormTestImpl( tensorInfo.SetQuantizationOffset(qOffset); } - auto inputTensor = MakeTensor(inputTensorInfo, QuantizedVector(inputValues, qScale, qOffset)); + auto inputTensor = QuantizedVector(inputValues, qScale, qOffset); // These values are per-channel of the input. - auto mean = MakeTensor(tensorInfo, QuantizedVector({ 3, -2 }, qScale, qOffset)); - auto variance = MakeTensor(tensorInfo, QuantizedVector({ 4, 9 }, qScale, qOffset)); - auto beta = MakeTensor(tensorInfo, QuantizedVector({ 3, 2 }, qScale, qOffset)); - auto gamma = MakeTensor(tensorInfo, QuantizedVector({ 2, 1 }, qScale, qOffset)); + auto mean = QuantizedVector({ 3, -2 }, qScale, qOffset); + auto variance = QuantizedVector({ 4, 9 }, qScale, qOffset); + auto beta = QuantizedVector({ 3, 2 }, qScale, qOffset); + auto gamma = QuantizedVector({ 2, 1 }, qScale, qOffset); - LayerTestResult result(outputTensorInfo); - - result.outputExpected = MakeTensor(inputTensorInfo, - QuantizedVector(expectedOutputValues, qScale, qOffset)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = QuantizedVector(expectedOutputValues, qScale, qOffset); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -88,10 +86,10 @@ LayerTestResult BatchNormTestImpl( descriptor.m_Parameters.m_DataLayout = dataLayout; armnn::WorkloadInfo info; - AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); - AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); - AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); - AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); + AllocateAndCopyDataToITensorHandle(&meanTensor, mean.data()); + AllocateAndCopyDataToITensorHandle(&varianceTensor, variance.data()); + AllocateAndCopyDataToITensorHandle(&betaTensor, beta.data()); + AllocateAndCopyDataToITensorHandle(&gammaTensor, gamma.data()); AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); @@ -101,13 +99,16 @@ LayerTestResult BatchNormTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -140,20 +141,19 @@ LayerTestResult BatchNormTestNhwcImpl( tensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector( + auto input = QuantizedVector( { 1.f, 1.f, 4.f, 1.f, 4.f, 4.f, 2.f, 1.f, 1.f, -2.f, 6.f, 4.f }, - qScale, qOffset)); + qScale, qOffset); + // These values are per-channel of the input. - auto mean = MakeTensor(tensorInfo, QuantizedVector({ 3, -2 }, qScale, qOffset)); - auto variance = MakeTensor(tensorInfo, QuantizedVector({ 4, 9 }, qScale, qOffset)); - auto beta = MakeTensor(tensorInfo, QuantizedVector({ 3, 2 }, qScale, qOffset)); - auto gamma = MakeTensor(tensorInfo, QuantizedVector({ 2, 1 }, qScale, qOffset)); - LayerTestResult ret(outputTensorInfo); + auto mean = QuantizedVector({ 3, -2 }, qScale, qOffset); + auto variance = QuantizedVector({ 4, 9 }, qScale, qOffset); + auto beta = QuantizedVector({ 3, 2 }, qScale, qOffset); + auto gamma = QuantizedVector({ 2, 1 }, qScale, qOffset); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -179,30 +179,34 @@ LayerTestResult BatchNormTestNhwcImpl( data.m_Parameters.m_Eps = 0.0f; data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; + std::vector actualOutput(outputTensorInfo.GetNumElements()); + // For each channel: // substract mean, divide by standard deviation (with an epsilon to avoid div by 0), // multiply by gamma and add beta - ret.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector( + std::vector expectedOutput = QuantizedVector( { 1.f, 3.f, 4.f, 3.f, 4.f, 4.f, 2.f, 3.f, 1.f, 2.f, 6.f, 4.f }, - qScale, qOffset)); + qScale, qOffset); std::unique_ptr workload = workloadFactory.CreateBatchNormalization(data, info); inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace @@ -627,14 +631,15 @@ LayerTestResult CompareBatchNormTest( outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32); - auto input = MakeRandomTensor(inputTensorInfo, 21312); + auto input = MakeRandomTensor(inputTensorInfo, 21312); - auto mean = MakeRandomTensor(tensorInfo, 123); - auto variance = MakeRandomTensor(tensorInfo, 234, 0.0f); - auto beta = MakeRandomTensor(tensorInfo, 123); - auto gamma = MakeRandomTensor(tensorInfo, 345); + auto mean = MakeRandomTensor(tensorInfo, 123); + auto variance = MakeRandomTensor(tensorInfo, 234, 0.0f); + auto beta = MakeRandomTensor(tensorInfo, 123); + auto gamma = MakeRandomTensor(tensorInfo, 345); - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -675,16 +680,19 @@ LayerTestResult CompareBatchNormTest( inputHandleRef->Allocate(); outputHandleRef->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); workload->PostAllocationConfigure(); workload->Execute(); workloadRef->PostAllocationConfigure(); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp index 9d539975c7..3669281d48 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp @@ -51,11 +51,10 @@ LayerTestResult BatchToSpaceNdHelper( outputTensorInfo.SetQuantizationScale(scale); outputTensorInfo.SetQuantizationOffset(offset); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); + std::vector input = ConvertToDataType(inputData, inputTensorInfo); - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(outputData, outputTensorInfo)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = ConvertToDataType(outputData, outputTensorInfo); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -73,14 +72,17 @@ LayerTestResult BatchToSpaceNdHelper( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp index ad23b8c767..aec57dbad1 100644 --- a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp @@ -31,10 +31,7 @@ LayerTestResult CastTest(armnn::IWorkloadFactory& workloadFactory, outputTensorInfo.SetQuantizationOffset(quantizationOffset); } - auto input = MakeTensor(inputTensorInfo, inputValues); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputValues); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -49,13 +46,16 @@ LayerTestResult CastTest(armnn::IWorkloadFactory& workloadFactory, inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult CastInt32ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp index be44234b76..68bc588860 100644 --- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp @@ -52,10 +52,7 @@ LayerTestResult ComparisonTestImpl( ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims); armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset); - auto input0 = MakeTensor(inputTensorInfo0, values0); - auto input1 = MakeTensor(inputTensorInfo1, values1); - - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0); std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); @@ -75,18 +72,19 @@ LayerTestResult ComparisonTestImpl( inputHandle1->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle0.get(), input0.origin()); - CopyDataToITensorHandle(inputHandle1.get(), input1.origin()); + CopyDataToITensorHandle(inputHandle0.get(), values0.data()); + CopyDataToITensorHandle(inputHandle1.get(), values1.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); - - ret.outputExpected = MakeTensor(outputTensorInfo, outValues); - ret.compareBoolean = true; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape(), + true); } template Concat1dTestImpl( { TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector({ 1.0f, 2.0f, 3.0f }, qScale, qOffset)); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector({ 4.0f, 5.0f, 6.0f }, qScale, qOffset)); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector({ 7.0f, 8.0f, 9.0f }, qScale, qOffset)); + auto input0 = QuantizedVector({ 1.0f, 2.0f, 3.0f }, qScale, qOffset); + auto input1 = QuantizedVector({ 4.0f, 5.0f, 6.0f }, qScale, qOffset); + auto input2 = QuantizedVector({ 7.0f, 8.0f, 9.0f }, qScale, qOffset); TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset); @@ -446,12 +446,12 @@ LayerTestResult Concat1dTestImpl( 0, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -468,7 +468,7 @@ LayerTestResult Concat2dTestImpl( { TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, @@ -476,9 +476,9 @@ LayerTestResult Concat2dTestImpl( // Batch 1 10.0f, 11.0f, 12.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0 4.0f, 5.0f, 6.0f, @@ -486,9 +486,9 @@ LayerTestResult Concat2dTestImpl( // Batch 1 13.0f, 14.0f, 15.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 0 7.0f, 8.0f, 9.0f, @@ -496,7 +496,7 @@ LayerTestResult Concat2dTestImpl( // Batch 1 16.0f, 17.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -510,7 +510,7 @@ LayerTestResult Concat2dTestImpl( dimension, true); - result.output = MakeTensor(outputTensorInfo, output); + result.m_ActualData = output; return result; } @@ -527,7 +527,7 @@ LayerTestResult Concat2dDim0TestImpl( LayerTestResult result = Concat2dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, @@ -547,7 +547,7 @@ LayerTestResult Concat2dDim0TestImpl( // Batch 5 16.0f, 17.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -565,7 +565,7 @@ LayerTestResult Concat2dDim1TestImpl( LayerTestResult result = Concat2dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, @@ -573,7 +573,7 @@ LayerTestResult Concat2dDim1TestImpl( // Batch 1 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -587,7 +587,7 @@ LayerTestResult Concat2dDim0DiffInputDimsTestImpl( int32_t qOffset) { TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, @@ -595,10 +595,10 @@ LayerTestResult Concat2dDim0DiffInputDimsTestImpl( // Batch 1 10.0f, 11.0f, 12.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0 4.0f, 5.0f, 6.0f, @@ -609,15 +609,15 @@ LayerTestResult Concat2dDim0DiffInputDimsTestImpl( // Batch 0 7.0f, 8.0f, 9.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 1 16.0f, 17.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -632,8 +632,8 @@ LayerTestResult Concat2dDim0DiffInputDimsTestImpl( 0, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, @@ -653,7 +653,7 @@ LayerTestResult Concat2dDim0DiffInputDimsTestImpl( // Batch 5 16.0f, 17.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -667,7 +667,7 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( int32_t qOffset) { TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, @@ -675,10 +675,10 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( // Batch 1 10.0f, 11.0f, 12.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, @@ -686,10 +686,10 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( // Batch 1 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 0 9.0f, @@ -697,7 +697,7 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( // Batch 1 18.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -712,8 +712,8 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( 1, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { // Batch 0 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, @@ -721,7 +721,7 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( // Batch 1 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -739,7 +739,7 @@ LayerTestResult Concat3dTestImpl( { TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -759,9 +759,9 @@ LayerTestResult Concat3dTestImpl( // Batch 1, Channel 2 23.0f, 24.0f }, - qScale, qOffset)); + qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0, Channel 0 7.0f, 8.0f, @@ -781,9 +781,9 @@ LayerTestResult Concat3dTestImpl( // Batch 1, Channel 2 29.0f, 30.0f }, - qScale, qOffset)); + qScale, qOffset); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 0, Channel 0 13.0f, 14.0f, @@ -803,7 +803,7 @@ LayerTestResult Concat3dTestImpl( // Batch 1, Channel 2 35.0f, 36.0f }, - qScale, qOffset)); + qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -817,7 +817,7 @@ LayerTestResult Concat3dTestImpl( dimension, useSubtensor); - result.output = MakeTensor(outputTensorInfo, output); + result.m_ActualData = output; return result; } @@ -834,7 +834,7 @@ LayerTestResult Concat3dDim0TestImpl( LayerTestResult result = Concat3dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -890,7 +890,7 @@ LayerTestResult Concat3dDim0TestImpl( // Batch 5, Channel 2 35.0f, 36.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -908,7 +908,7 @@ LayerTestResult Concat3dDim1TestImpl( LayerTestResult result = Concat3dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -964,7 +964,7 @@ LayerTestResult Concat3dDim1TestImpl( // Batch 1, Channel 8 35.0f, 36.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -983,7 +983,7 @@ LayerTestResult Concat3dDim2TestImpl( LayerTestResult result = Concat3dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, useSubtensor, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f, @@ -1003,7 +1003,7 @@ LayerTestResult Concat3dDim2TestImpl( // Batch 1, Channel 2 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f, }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1017,7 +1017,7 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( int32_t qOffset) { TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -1037,10 +1037,10 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 1, Channel 2 23.0f, 24.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0, Channel 0 7.0f, 8.0f, @@ -1051,10 +1051,10 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 0, Channel 2 11.0f, 12.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 0, Channel 0 25.0f, 26.0f, @@ -1083,7 +1083,7 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 2, Channel 2 35.0f, 36.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); @@ -1098,8 +1098,8 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( 0, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -1155,7 +1155,7 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 5, Channel 2 35.0f, 36.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1169,7 +1169,7 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( int32_t qOffset) { TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -1189,10 +1189,10 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( // Batch 1, Channel 2 23.0f, 24.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0, Channel 0 7.0f, 8.0f, @@ -1218,10 +1218,10 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( // Batch 1, Channel 3 15.0f, 16.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 0, Channel 0 17.0f, 18.0f, @@ -1229,7 +1229,7 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( // Batch 1, Channel 0 31.0f, 32.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1244,8 +1244,8 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( 1, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -1295,7 +1295,7 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( // Batch 1, Channel 7 31.0f, 32.0f, }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1310,7 +1310,7 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( int32_t qOffset) { TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, @@ -1330,10 +1330,10 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( // Batch 1, Channel 2 23.0f, 24.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { // Batch 0, Channel 0 7.0f, @@ -1353,10 +1353,10 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( // Batch 1, Channel 2 29.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { // Batch 0, Channel 0 13.0f, 14.0f, 50.0f, @@ -1376,7 +1376,7 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( // Batch 1, Channel 2 35.0f, 36.0f, 55.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1391,8 +1391,8 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( 2, useSubtensor); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { // Batch 0, Channel 0 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f, @@ -1412,7 +1412,7 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( // Batch 1, Channel 2 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f, }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1430,7 +1430,7 @@ LayerTestResult Concat4dTestImpl( { TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input0 = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1439,9 +1439,9 @@ LayerTestResult Concat4dTestImpl( 9.0f, 10.0f, 11.0f, 12.0f }, - qScale, qOffset)); + qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input1 = QuantizedVector( { 11.0f, 12.0f, 13.0f, 14.0f, @@ -1450,9 +1450,9 @@ LayerTestResult Concat4dTestImpl( 19.0f, 20.0f, 21.0f, 22.0f }, - qScale, qOffset)); + qScale, qOffset); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector( + auto input2 = QuantizedVector( { 21.0f, 22.0f, 23.0f, 24.0f, @@ -1461,7 +1461,7 @@ LayerTestResult Concat4dTestImpl( 29.0f, 30.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1478,7 +1478,7 @@ LayerTestResult Concat4dTestImpl( dimension, useSubtensor); - result.output = MakeTensor(outputTensorInfo, output); + result.m_ActualData = output; return result; } @@ -1495,7 +1495,7 @@ LayerTestResult Concat4dDim0TestImpl( LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1518,7 +1518,7 @@ LayerTestResult Concat4dDim0TestImpl( 29.0f, 30.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1536,7 +1536,7 @@ LayerTestResult Concat4dDim1TestImpl( LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1559,7 +1559,7 @@ LayerTestResult Concat4dDim1TestImpl( 29.0f, 30.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1577,7 +1577,7 @@ LayerTestResult Concat4dDim2TestImpl( LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1600,7 +1600,7 @@ LayerTestResult Concat4dDim2TestImpl( 29.0f, 30.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1619,7 +1619,7 @@ LayerTestResult Concat4dDim3TestImpl( LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 3, useSubtensor, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 11.0f, 12.0f, @@ -1642,7 +1642,7 @@ LayerTestResult Concat4dDim3TestImpl( 21.0f, 22.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1658,7 +1658,7 @@ LayerTestResult Concat4dDiffShapeDim0TestImpl( constexpr unsigned int dimension = 0u; TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + auto input0 = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1667,11 +1667,11 @@ LayerTestResult Concat4dDiffShapeDim0TestImpl( 9.0f, 10.0f, 11.0f, 12.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + auto input1 = QuantizedVector( { 11.0f, 12.0f, 13.0f, 14.0f, @@ -1687,7 +1687,7 @@ LayerTestResult Concat4dDiffShapeDim0TestImpl( 29.0f, 30.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset); @@ -1705,8 +1705,8 @@ LayerTestResult Concat4dDiffShapeDim0TestImpl( dimension, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1729,7 +1729,7 @@ LayerTestResult Concat4dDiffShapeDim0TestImpl( 29.0f, 30.0f, 31.0f, 32.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1745,7 +1745,7 @@ LayerTestResult Concat4dDiffShapeDim1TestImpl( constexpr unsigned int dimension = 1u; TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + auto input0 = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1754,18 +1754,18 @@ LayerTestResult Concat4dDiffShapeDim1TestImpl( 9.0f, 10.0f, 11.0f, 12.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + auto input1 = QuantizedVector( { 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset); @@ -1783,8 +1783,8 @@ LayerTestResult Concat4dDiffShapeDim1TestImpl( dimension, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1797,7 +1797,7 @@ LayerTestResult Concat4dDiffShapeDim1TestImpl( 15.0f, 16.0f, 17.0f, 18.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1813,7 +1813,7 @@ LayerTestResult Concat4dDiffShapeDim2TestImpl( constexpr unsigned int dimension = 2u; TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + auto input0 = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1822,10 +1822,10 @@ LayerTestResult Concat4dDiffShapeDim2TestImpl( 9.0f, 10.0f, 11.0f, 12.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + auto input1 = QuantizedVector( { 11.0f, 12.0f, 13.0f, 14.0f, @@ -1837,7 +1837,7 @@ LayerTestResult Concat4dDiffShapeDim2TestImpl( 25.0f, 26.0f, 27.0f, 28.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1854,8 +1854,8 @@ LayerTestResult Concat4dDiffShapeDim2TestImpl( dimension, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1875,7 +1875,7 @@ LayerTestResult Concat4dDiffShapeDim2TestImpl( 25.0f, 26.0f, 27.0f, 28.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1892,7 +1892,7 @@ LayerTestResult Concat4dDiffShapeDim3TestImpl( constexpr unsigned int dimension = 3u; TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + auto input0 = QuantizedVector( { 1.0f, 2.0f, 3.0f, 4.0f, @@ -1901,10 +1901,10 @@ LayerTestResult Concat4dDiffShapeDim3TestImpl( 9.0f, 10.0f, 11.0f, 12.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + auto input1 = QuantizedVector( { 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, @@ -1915,7 +1915,7 @@ LayerTestResult Concat4dDiffShapeDim3TestImpl( 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f }, - qScale, qOffset)); + qScale, qOffset); TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset); @@ -1933,8 +1933,8 @@ LayerTestResult Concat4dDiffShapeDim3TestImpl( dimension, useSubtensor); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + result.m_ActualData = output; + result.m_ExpectedData = QuantizedVector( { 1.0f, 2.0f, 11.0f, 12.0f, 13.0f, 3.0f, 4.0f, 14.0f, 15.0f, 16.0f, @@ -1943,7 +1943,7 @@ LayerTestResult Concat4dDiffShapeDim3TestImpl( 9.0f, 10.0f, 23.0f, 24.0f, 25.0f, 11.0f, 12.0f, 26.0f, 27.0f, 28.0f }, - qScale, qOffset)); + qScale, qOffset); return result; } @@ -1968,7 +1968,7 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( const float inputScale1 = 0.5f; const int32_t inputOffset1 = 5; - auto input1 = MakeTensor(inputTensorInfo1, std::vector( + std::vector input1 = { 1, 2, 3, 4, 5, 6, @@ -1983,13 +1983,13 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( 28, 29, 30, 31, 32, 33, 34, 35, 36 - })); + }; // Quatized input2 tensor. const float inputScale2 = 0.2f; const int32_t inputOffset2 = 10; - auto input2 = MakeTensor(inputTensorInfo2, std::vector( + std::vector input2 = { 37, 38, 39, 40, 41, 42, @@ -1997,15 +1997,15 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( 46, 47, 48, 49, 50, 51, 52, 53, 54 - })); + }; // Quantized output tensor. const float outputScale = 0.1f; const int32_t outputOffset = 20; - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( + std::vector expectedOutput = { 0, 5, 74, 10, 15, 76, @@ -2027,7 +2027,7 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( 150, 155, 104, 160, 165, 106, 170, 175, 108 - })); + }; outputTensorInfo.SetQuantizationScale(outputScale); outputTensorInfo.SetQuantizationOffset(outputOffset); @@ -2075,15 +2075,18 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } // @@ -2132,61 +2135,58 @@ LayerTestResult ConcatTest( TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32); TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32); - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( + std::vector expectedOutput = { - 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 7.0f, 8.0f, 9.0f, - 10.0f, 11.0f, 12.0f, - 13.0f, 14.0f, 15.0f, - 16.0f, 17.0f, 18.0f, - - 19.0f, 20.0f, 21.0f, - 22.0f, 23.0f, 24.0f, - 25.0f, 26.0f, 27.0f, - 28.0f, 29.0f, 30.0f, - 31.0f, 32.0f, 33.0f, - 34.0f, 35.0f, 36.0f, - - 37.0f, 38.0f, 39.0f, - 40.0f, 41.0f, 42.0f, - 43.0f, 44.0f, 45.0f, - 46.0f, 47.0f, 48.0f, - 49.0f, 50.0f, 51.0f, - 52.0f, 53.0f, 54.0f, - }) - ); - - auto input1 = MakeTensor(inputTensorInfo1, std::vector( - { - 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 7.0f, 8.0f, 9.0f, - 10.0f, 11.0f, 12.0f, - 13.0f, 14.0f, 15.0f, - 16.0f, 17.0f, 18.0f, - - 19.0f, 20.0f, 21.0f, - 22.0f, 23.0f, 24.0f, - 25.0f, 26.0f, 27.0f, - 28.0f, 29.0f, 30.0f, - 31.0f, 32.0f, 33.0f, - 34.0f, 35.0f, 36.0f, - }) - ); - - auto input2 = MakeTensor(inputTensorInfo2, std::vector( - { - 37.0f, 38.0f, 39.0f, - 40.0f, 41.0f, 42.0f, - 43.0f, 44.0f, 45.0f, - 46.0f, 47.0f, 48.0f, - 49.0f, 50.0f, 51.0f, - 52.0f, 53.0f, 54.0f, - }) - ); + 1.0f, 2.0f, 3.0f, + 4.0f, 5.0f, 6.0f, + 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, + 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, + + 19.0f, 20.0f, 21.0f, + 22.0f, 23.0f, 24.0f, + 25.0f, 26.0f, 27.0f, + 28.0f, 29.0f, 30.0f, + 31.0f, 32.0f, 33.0f, + 34.0f, 35.0f, 36.0f, + + 37.0f, 38.0f, 39.0f, + 40.0f, 41.0f, 42.0f, + 43.0f, 44.0f, 45.0f, + 46.0f, 47.0f, 48.0f, + 49.0f, 50.0f, 51.0f, + 52.0f, 53.0f, 54.0f + }; + + std::vector input1 = + { + 1.0f, 2.0f, 3.0f, + 4.0f, 5.0f, 6.0f, + 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, + 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, + + 19.0f, 20.0f, 21.0f, + 22.0f, 23.0f, 24.0f, + 25.0f, 26.0f, 27.0f, + 28.0f, 29.0f, 30.0f, + 31.0f, 32.0f, 33.0f, + 34.0f, 35.0f, 36.0f + }; + + std::vector input2 = + { + 37.0f, 38.0f, 39.0f, + 40.0f, 41.0f, 42.0f, + 43.0f, 44.0f, 45.0f, + 46.0f, 47.0f, 48.0f, + 49.0f, 50.0f, 51.0f, + 52.0f, 53.0f, 54.0f, + }; std::vector wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0]. ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); @@ -2223,15 +2223,18 @@ LayerTestResult ConcatTest( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult Concat1dTest( @@ -2448,7 +2451,7 @@ LayerTestResult ConcatUint8DifferentQParamsTest( const float inputScale1 = 0.015686f; const int32_t inputOffset1 = 192; - auto input1 = MakeTensor(inputTensorInfo1, std::vector( + std::vector input1 = { 1, 2, 3, 4, 5, 6, @@ -2462,33 +2465,31 @@ LayerTestResult ConcatUint8DifferentQParamsTest( 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, - }) - ); + 34, 35, 36 + }; // Quatized input2 tensor. Range [-1, 4] const float inputScale2 = 0.019608f; const int32_t inputOffset2 = 50; - auto input2 = MakeTensor(inputTensorInfo2, std::vector( + std::vector input2 = { 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, - }) - ); + 52, 53, 54 + }; // Output has the same quantization parameters than input1, // so that only the requantization of input2 is required const float outputScale = 0.015686f; const int32_t outputOffset = 192; - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( + std::vector expectedOutput = { 1, 2, 3, 4, 5, 6, @@ -2509,9 +2510,8 @@ LayerTestResult ConcatUint8DifferentQParamsTest( 183, 184, 186, 187, 188, 189, 191, 192, 193, - 195, 196, 197, - }) - ); + 195, 196, 197 + }; outputTensorInfo.SetQuantizationScale(outputScale); outputTensorInfo.SetQuantizationOffset(outputOffset); @@ -2555,15 +2555,18 @@ LayerTestResult ConcatUint8DifferentQParamsTest( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult ConcatUint8Test( @@ -2601,34 +2604,9 @@ LayerTestResult ConcatUint8Test( inputTensorInfo2.SetQuantizationScale(scale); inputTensorInfo2.SetQuantizationOffset(offset); - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( - { - 1, 2, 3, - 4, 5, 6, - 7, 8, 9, - 10, 11, 12, - 13, 14, 15, - 16, 17, 18, - - 19, 20, 21, - 22, 23, 24, - 25, 26, 27, - 28, 29, 30, - 31, 32, 33, - 34, 35, 36, - - 37, 38, 39, - 40, 41, 42, - 43, 44, 45, - 46, 47, 48, - 49, 50, 51, - 52, 53, 54, - }) - ); - - auto input1 = MakeTensor(inputTensorInfo1, std::vector( + std::vector expectedOutput = { 1, 2, 3, 4, 5, 6, @@ -2643,19 +2621,41 @@ LayerTestResult ConcatUint8Test( 28, 29, 30, 31, 32, 33, 34, 35, 36, - }) - ); - auto input2 = MakeTensor(inputTensorInfo2, std::vector( + 37, 38, 39, + 40, 41, 42, + 43, 44, 45, + 46, 47, 48, + 49, 50, 51, + 52, 53, 54 + }; + + std::vector input1 = + { + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + + 19, 20, 21, + 22, 23, 24, + 25, 26, 27, + 28, 29, 30, + 31, 32, 33, + 34, 35, 36 + }; + + std::vector input2 = { 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, - }) - ); + 52, 53, 54 + }; std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); @@ -2693,15 +2693,18 @@ LayerTestResult ConcatUint8Test( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult ConcatUint16Test( @@ -2739,9 +2742,9 @@ LayerTestResult ConcatUint16Test( inputTensorInfo2.SetQuantizationScale(scale); inputTensorInfo2.SetQuantizationOffset(offset); - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - ret.outputExpected = MakeTensor(outputTensorInfo, std::vector( + std::vector expectedOutput = { 1, 2, 3, 4, 5, 6, @@ -2762,10 +2765,10 @@ LayerTestResult ConcatUint16Test( 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, - })); + 52, 53, 54 + }; - auto input1 = MakeTensor(inputTensorInfo1, std::vector( + std::vector input1 = { 1, 2, 3, 4, 5, 6, @@ -2780,9 +2783,9 @@ LayerTestResult ConcatUint16Test( 28, 29, 30, 31, 32, 33, 34, 35, 36, - })); + }; - auto input2 = MakeTensor(inputTensorInfo2, std::vector( + std::vector input2 = { 37, 38, 39, 40, 41, 42, @@ -2790,7 +2793,7 @@ LayerTestResult ConcatUint16Test( 46, 47, 48, 49, 50, 51, 52, 53, 54, - })); + }; std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); @@ -2829,15 +2832,18 @@ LayerTestResult ConcatUint16Test( inputHandle2->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); - CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle2.get(), input2.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult Concat1dUint8Test( diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp index c28ef40b45..bb827ef359 100644 --- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp @@ -55,54 +55,52 @@ LayerTestResult ConstantTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, std::vector( - armnnUtils::QuantizedVector( - { - // Batch 0, Channel 0 - 235.0f, 46.0f, 178.0f, - 100.0f, 123.0f, 19.0f, - 172.0f, 74.0f, 250.0f, - 6.0f, 195.0f, 80.0f, - - // Batch 0, Channel 1 - 113.0f, 95.0f, 202.0f, - 77.0f, 114.0f, 71.0f, - 122.0f, 246.0f, 166.0f, - 82.0f, 28.0f, 37.0f, - - // Batch 0, Channel 2 - 56.0f, 170.0f, 162.0f, - 194.0f, 89.0f, 254.0f, - 12.0f, 209.0f, 200.0f, - 1.0f, 64.0f, 54.0f, - - // Batch 1, Channel 0 - 67.0f, 90.0f, 49.0f, - 7.0f, 163.0f, 18.0f, - 25.0f, 117.0f, 103.0f, - 247.0f, 59.0f, 189.0f, - - // Batch 1, Channel 1 - 239.0f, 104.0f, 199.0f, - 17.0f, 124.0f, 153.0f, - 222.0f, 217.0f, 75.0f, - 32.0f, 126.0f, 21.0f, - - // Batch 1, Channel 2 - 97.0f, 145.0f, 215.0f, - 115.0f, 116.0f, 238.0f, - 226.0f, 16.0f, 132.0f, - 92.0f, 125.0f, 88.0f, - }, - qScale, qOffset))); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = input; + auto input = armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0 + 235.0f, 46.0f, 178.0f, + 100.0f, 123.0f, 19.0f, + 172.0f, 74.0f, 250.0f, + 6.0f, 195.0f, 80.0f, + + // Batch 0, Channel 1 + 113.0f, 95.0f, 202.0f, + 77.0f, 114.0f, 71.0f, + 122.0f, 246.0f, 166.0f, + 82.0f, 28.0f, 37.0f, + + // Batch 0, Channel 2 + 56.0f, 170.0f, 162.0f, + 194.0f, 89.0f, 254.0f, + 12.0f, 209.0f, 200.0f, + 1.0f, 64.0f, 54.0f, + + // Batch 1, Channel 0 + 67.0f, 90.0f, 49.0f, + 7.0f, 163.0f, 18.0f, + 25.0f, 117.0f, 103.0f, + 247.0f, 59.0f, 189.0f, + + // Batch 1, Channel 1 + 239.0f, 104.0f, 199.0f, + 17.0f, 124.0f, 153.0f, + 222.0f, 217.0f, 75.0f, + 32.0f, 126.0f, 21.0f, + + // Batch 1, Channel 2 + 97.0f, 145.0f, 215.0f, + 115.0f, 116.0f, 238.0f, + 226.0f, 16.0f, 132.0f, + 92.0f, 125.0f, 88.0f, + }, + qScale, qOffset); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::ScopedTensorHandle constantTensor(inputTensorInfo); - AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&constantTensor, input.data()); armnn::ConstantQueueDescriptor descriptor; descriptor.m_LayerOutput = &constantTensor; @@ -117,8 +115,12 @@ LayerTestResult ConstantTestImpl( workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); - return result; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + input, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp index 8f60415a66..98264ee928 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp @@ -70,55 +70,49 @@ using namespace armnnUtils; // Helper template that returns either Bias2 or an empty vector depending on whether bias is enabled. template> -boost::multi_array GetBias2(bool biasEnabled, float qScale) +std::vector GetBias2(bool biasEnabled, float qScale) { if(biasEnabled) { - armnn::TensorInfo biasDesc({static_cast(Bias2.size())}, ArmnnType); - boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(Bias2, qScale, 0)); - return bias; + return QuantizedVector(Bias2, qScale, 0); } else { - return boost::multi_array(); + return std::vector(); } } // Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled. template> -boost::multi_array GetBias4(bool biasEnabled, float qScale) +std::vector GetBias4(bool biasEnabled, float qScale) { if(biasEnabled) { - armnn::TensorInfo biasDesc({static_cast(Bias4.size())}, ArmnnType); - boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(Bias4, qScale, 0)); - return bias; + return QuantizedVector(Bias4, qScale, 0); } else { - return boost::multi_array(); + return std::vector(); } } // Helper template that returns either Bias8 or an empty vector depending on whether bias is enabled. template> -boost::multi_array GetBias8(bool biasEnabled, float qScale) +std::vector GetBias8(bool biasEnabled, float qScale) { if(biasEnabled) { - armnn::TensorInfo biasDesc({static_cast(Bias4.size())}, ArmnnType); - boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(Bias8, qScale, 0)); - return bias; + return QuantizedVector(Bias8, qScale, 0); } else { - return boost::multi_array(); + return std::vector(); } } // Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled. template> -boost::multi_array GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout) +std::vector GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout) { const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout); const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex(); @@ -201,10 +195,13 @@ LayerTestResult SimpleConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& originalInput, - const boost::multi_array& originalKernel, - const boost::multi_array& bias, - const boost::multi_array& originalOutputExpected, + const std::vector& originalInput, + const std::vector& originalKernel, + const std::vector& bias, + const std::vector& originalOutputExpected, + const armnn::TensorShape& originalInputShape, + const armnn::TensorShape& originalKernelShape, + const armnn::TensorShape& originalOutputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout = armnn::DataLayout::NCHW, @@ -218,20 +215,20 @@ LayerTestResult SimpleConvolution2dTestImpl( uint32_t dilationY = 1) { armnn::IgnoreUnused(memoryManager); - unsigned int inputHeight = armnn::numeric_cast(originalInput.shape()[2]); - unsigned int inputWidth = armnn::numeric_cast(originalInput.shape()[3]); - unsigned int inputChannels = armnn::numeric_cast(originalInput.shape()[1]); - unsigned int inputNum = armnn::numeric_cast(originalInput.shape()[0]); + unsigned int inputHeight = armnn::numeric_cast(originalInputShape[2]); + unsigned int inputWidth = armnn::numeric_cast(originalInputShape[3]); + unsigned int inputChannels = armnn::numeric_cast(originalInputShape[1]); + unsigned int inputNum = armnn::numeric_cast(originalInputShape[0]); - unsigned int outputHeight = armnn::numeric_cast(originalOutputExpected.shape()[2]); - unsigned int outputWidth = armnn::numeric_cast(originalOutputExpected.shape()[3]); - unsigned int outputChannels = armnn::numeric_cast(originalOutputExpected.shape()[1]); - unsigned int outputNum = armnn::numeric_cast(originalOutputExpected.shape()[0]); + unsigned int outputHeight = armnn::numeric_cast(originalOutputExpectedShape[2]); + unsigned int outputWidth = armnn::numeric_cast(originalOutputExpectedShape[3]); + unsigned int outputChannels = armnn::numeric_cast(originalOutputExpectedShape[1]); + unsigned int outputNum = armnn::numeric_cast(originalOutputExpectedShape[0]); - unsigned int kernelHeight = armnn::numeric_cast(originalKernel.shape()[2]); - unsigned int kernelWidth = armnn::numeric_cast(originalKernel.shape()[3]); - unsigned int kernelChannels = armnn::numeric_cast(originalKernel.shape()[1]); - unsigned int kernelDepthMul = armnn::numeric_cast(originalKernel.shape()[0]); + unsigned int kernelHeight = armnn::numeric_cast(originalKernelShape[2]); + unsigned int kernelWidth = armnn::numeric_cast(originalKernelShape[3]); + unsigned int kernelChannels = armnn::numeric_cast(originalKernelShape[1]); + unsigned int kernelDepthMul = armnn::numeric_cast(originalKernelShape[0]); bool biasEnabled = bias.size() > 0; @@ -242,7 +239,6 @@ LayerTestResult SimpleConvolution2dTestImpl( // If a bias is used, its size must equal the number of output channels. ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); - // Note these tensors will use two (identical) batches. armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType); @@ -265,8 +261,6 @@ LayerTestResult SimpleConvolution2dTestImpl( biasDesc.SetQuantizationOffset(0); } - LayerTestResult ret(outputTensorInfo); - // Construct input data - two batches of the same input image. std::vector inputImage; inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth); @@ -283,8 +277,6 @@ LayerTestResult SimpleConvolution2dTestImpl( inputData = tmp; } - auto batchedInput = MakeTensor(inputTensorInfo, inputData); - std::vector outputImage; outputImage.assign(originalOutputExpected.data(), originalOutputExpected.data() + outputChannels*outputHeight*outputWidth); @@ -299,19 +291,21 @@ LayerTestResult SimpleConvolution2dTestImpl( outputWidth, outputHeight); } + // Data will be copied from outputHandle + std::vector actualOutput(outputTensorInfo.GetNumElements()); + // Construct expected output data - two identical images. - std::vector outputData; - outputData.insert(outputData.end(), outputImage.begin(), outputImage.end()); - outputData.insert(outputData.end(), outputImage.begin(), outputImage.end()); + std::vector expectedOutput; + expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end()); + expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end()); // at this point if we require it permute the expected output if (layout == armnn::DataLayout::NHWC) { - std::vector tmp(outputData.size()); - armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T)); - outputData = tmp; + std::vector tmp(expectedOutput.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, expectedOutput.data(), tmp.data(), sizeof(T)); + expectedOutput = tmp; } - ret.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -320,17 +314,18 @@ LayerTestResult SimpleConvolution2dTestImpl( armnn::WorkloadInfo info; armnn::ScopedTensorHandle weightsTensor(kernelDesc); armnn::ScopedTensorHandle biasTensor(biasDesc); + // Permute the kernel if necessary - boost::multi_array kernel = boost::multi_array(originalKernel); + std::vector kernel = originalKernel; if (layout == armnn::DataLayout::NHWC) { armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T)); } - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data()); if(biasEnabled) { - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data()); } AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); @@ -353,13 +348,16 @@ LayerTestResult SimpleConvolution2dTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template SimpleConvolution2dNhwcTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& kernel, - const boost::multi_array& bias, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& kernel, + const std::vector& bias, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& kernelShape, + const armnn::TensorShape& outputExpectedShape, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, @@ -384,20 +385,20 @@ LayerTestResult SimpleConvolution2dNhwcTestImpl( uint32_t strideY = 1) { armnn::IgnoreUnused(qScale, qOffset); - unsigned int inputNum = armnn::numeric_cast(input.shape()[0]); - unsigned int inputChannels = armnn::numeric_cast(input.shape()[3]); - unsigned int inputHeight = armnn::numeric_cast(input.shape()[1]); - unsigned int inputWidth = armnn::numeric_cast(input.shape()[2]); + unsigned int inputNum = armnn::numeric_cast(inputShape[0]); + unsigned int inputChannels = armnn::numeric_cast(inputShape[3]); + unsigned int inputHeight = armnn::numeric_cast(inputShape[1]); + unsigned int inputWidth = armnn::numeric_cast(inputShape[2]); - unsigned int kernelChanMul = armnn::numeric_cast(kernel.shape()[0]); - unsigned int kernelChannels = armnn::numeric_cast(kernel.shape()[3]); - unsigned int kernelHeight = armnn::numeric_cast(kernel.shape()[1]); - unsigned int kernelWidth = armnn::numeric_cast(kernel.shape()[2]); + unsigned int kernelChanMul = armnn::numeric_cast(kernelShape[0]); + unsigned int kernelChannels = armnn::numeric_cast(kernelShape[3]); + unsigned int kernelHeight = armnn::numeric_cast(kernelShape[1]); + unsigned int kernelWidth = armnn::numeric_cast(kernelShape[2]); - unsigned int outputNum = armnn::numeric_cast(outputExpected.shape()[0]); - unsigned int outputChannels = armnn::numeric_cast(outputExpected.shape()[3]); - unsigned int outputHeight = armnn::numeric_cast(outputExpected.shape()[1]); - unsigned int outputWidth = armnn::numeric_cast(outputExpected.shape()[2]); + unsigned int outputNum = armnn::numeric_cast(outputExpectedShape[0]); + unsigned int outputChannels = armnn::numeric_cast(outputExpectedShape[3]); + unsigned int outputHeight = armnn::numeric_cast(outputExpectedShape[1]); + unsigned int outputWidth = armnn::numeric_cast(outputExpectedShape[2]); bool biasEnabled = bias.size() > 0; @@ -411,20 +412,18 @@ LayerTestResult SimpleConvolution2dNhwcTestImpl( // Construct the input data. std::vector inputData; inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels); - auto batchedInput = MakeTensor(inputTensorInfo, inputData); // Construct the output data, with bias applied, as appropriate. std::vector outputData; outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::ScopedTensorHandle weightsTensor(kernelDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data()); armnn::ScopedTensorHandle biasTensor(biasDesc); @@ -449,13 +448,16 @@ LayerTestResult SimpleConvolution2dNhwcTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -534,6 +536,8 @@ LayerTestResult Convolution1dTestImpl( outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset()); + std::vector actualOutput(outputInfo.GetNumElements()); + // Optionally apply bias to output image. if(biasEnabled) { @@ -574,11 +578,12 @@ LayerTestResult Convolution1dTestImpl( ExecuteWorkload(*workload, memoryManager); - // Output - LayerTestResult ret(outputInfo); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - ret.outputExpected = MakeTensor(outputInfo, outputData); - return ret; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputInfo.GetShape()); } template> @@ -594,34 +599,31 @@ LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( armnn::IgnoreUnused(biasEnabled); // Use common single-batch 5x5 image. - armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, - { - 1, 5, 2, 3, - 8, 7, 3, 6, - 3, 3, 9, 1 - }); - + armnn::TensorInfo inputDesc({ 1, 3, 4, 1 }, ArmnnType); + std::vector input = + { + 1, 5, 2, 3, + 8, 7, 3, 6, + 3, 3, 9, 1 + }; // Use a 2-element batch of 3-channel 3x3 kernels. - armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType); - boost::multi_array kernel = MakeTensor(kernelDesc, { - 4, 5, 6, - 0, 0, 0, - 3, 2, 1 - }); + armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType); + std::vector kernel = + { + 4, 5, 6, + 0, 0, 0, + 3, 2, 1 + }; // Expected output is 1 batch of a 5x5 image. - armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType); - + armnn::TensorInfo outputDesc({ 1, 3, 4, 1 }, ArmnnType); const std::vector outputData = - { - 23, 41, 33, 21, - 44, 65, 76, 52, - 82, 85, 79, 42 - }; - - boost::multi_array expectedOutput = MakeTensor(outputDesc, outputData); + { + 23, 41, 33, 21, + 44, 65, 76, 52, + 82, 85, 79, 42 + }; return SimpleConvolution2dNhwcTestImpl( workloadFactory, @@ -629,8 +631,11 @@ LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( tensorHandleFactory, input, kernel, - boost::multi_array(), - expectedOutput, + std::vector(), + outputData, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), dataLayout, qScale, qOffset); @@ -649,36 +654,33 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( armnn::IgnoreUnused(biasEnabled); // Input is a single-batch, 1 channel, 5x5 image. - armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, - { - 1, 5, 2, 3, 5, - 8, 7, 3, 6, 3, - 3, 3, 9, 1, 9, - 4, 1, 8, 1, 3, - 6, 8, 1, 9, 2 - }); + armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, ArmnnType); + std::vector input = + { + 1, 5, 2, 3, 5, + 8, 7, 3, 6, 3, + 3, 3, 9, 1, 9, + 4, 1, 8, 1, 3, + 6, 8, 1, 9, 2 + }; // Use a 3x3 kernel. - armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType); - boost::multi_array kernel = MakeTensor(kernelDesc, - { - 4, 5, 6, - 0, 0, 0, - 3, 2, 1 - }); + armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType); + std::vector kernel = + { + 4, 5, 6, + 0, 0, 0, + 3, 2, 1 + }; // Expected output is a single-batch, 1 channel, 3x3 image. - armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType); - - const std::vector outputData = - { - 23, 33, 24, - 91, 99, 48, - 26, 50, 19 - }; - - boost::multi_array expectedOutput = MakeTensor(outputDesc, outputData); + armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, ArmnnType); + std::vector outputData = + { + 23, 33, 24, + 91, 99, 48, + 26, 50, 19 + }; uint32_t padLeft = 1; uint32_t padTop = 1; @@ -693,8 +695,11 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( tensorHandleFactory, input, kernel, - boost::multi_array(), - expectedOutput, + std::vector(), + outputData, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), dataLayout, qScale, qOffset, @@ -717,13 +722,12 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( const armnn::DataLayout layout) { // Use common single-batch 3-channel 16x8 image. - armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(ConvInput3x8x16, qScale, qOffset)); + armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType); + std::vector input = QuantizedVector(ConvInput3x8x16, qScale, qOffset); // Use a 2-element batch with 3-channel 3x5 kernels. - armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType); - boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( - QuantizedVector({ + armnn::TensorInfo kernelDesc({ 2, 3, 5, 3 }, ArmnnType); + std::vector kernel = QuantizedVector({ 1, 1, 1, 1, -1, 1, 1, 1, 1, @@ -761,12 +765,11 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( 0, 0, 0, 0, 0, 0 }, - qScale, qOffset))); + qScale, qOffset); // Expected output is 2 batch elements of a 1-channel 14x4 image. - armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - QuantizedVector({ + armnn::TensorInfo outputDesc({ 1, 2, 4, 14 }, ArmnnType); + std::vector expectedOutput = QuantizedVector({ -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, @@ -779,7 +782,7 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - qScale, qOffset))); + qScale, qOffset); return SimpleConvolution2dTestImpl( workloadFactory, @@ -789,6 +792,9 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( kernel, GetBias2(biasEnabled, qScale * qScale), expectedOutput, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), qScale, qOffset, layout); @@ -808,13 +814,13 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path. // Use common single-batch 3-channel 16x8 image. - armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(ConvInput3x8x16, qScale, qOffset)); + armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType); + std::vector inputShape = { 1, 3, 8, 16 }; + std::vector input = QuantizedVector(ConvInput3x8x16, qScale, qOffset); // Use a 2-element batch of 3-channel 3x3 kernels. - armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType); - boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( - QuantizedVector({ + armnn::TensorInfo kernelDesc({ 2, 3, 3, 3 }, ArmnnType); + std::vector kernel = QuantizedVector({ 1, 1, 1, 1, -1, 1, 1, 1, 1, @@ -840,12 +846,11 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( 0, 0, 0, 0, 0, 0 }, - qScale, qOffset))); + qScale, qOffset); // Expected output is 1 batch of a 2-channel 14x6 image. - armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - QuantizedVector({ + armnn::TensorInfo outputDesc({ 1, 2, 6, 14 }, ArmnnType); + std::vector expectedOutput = QuantizedVector({ -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f, @@ -860,7 +865,7 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - qScale, qOffset))); + qScale, qOffset); return SimpleConvolution2dTestImpl( workloadFactory, @@ -870,6 +875,9 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( kernel, GetBias2(biasEnabled, qScale * qScale), expectedOutput, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), qScale, qOffset, layout); @@ -886,23 +894,23 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest int32_t qOffset) { // Use a single-batch 1-channel 3x3 image as input. - armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + armnn::TensorInfo inputDesc({ 1, 1, 3, 3 }, ArmnnType); + std::vector input = QuantizedVector({ 11,21,31, 12,22,32, 13,23,33 }, - qScale, qOffset))); + qScale, qOffset); // Use 1 batch of a 1-channel 2x2 kernel. - armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType); - boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( + armnn::TensorInfo kernelDesc({ 1, 1, 2, 2 }, ArmnnType); + std::vector kernel = QuantizedVector({ -11,-21, -12,-22, }, - qScale, qOffset))); + qScale, qOffset); // Expected output is 1 batch of a 1-channel 6x8 image. // Manually calculated like this: @@ -913,8 +921,8 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest //[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..] //[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..] //[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..] - armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( + armnn::TensorInfo outputDesc({ 1, 1, 8, 6 }, ArmnnType); + std::vector expectedOutput = QuantizedVector({ 0, 0, 0, 0, 0, 0, -242, -594, -934, -372, 0, 0, @@ -925,7 +933,7 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - qScale, qOffset))); + qScale, qOffset); return SimpleConvolution2dTestImpl( workloadFactory, @@ -935,6 +943,9 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest kernel, GetBias2(false, qScale * qScale), expectedOutput, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), qScale, qOffset, layout, @@ -956,30 +967,29 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( { // Use a single-batch 1-channel 5x5 image as input. armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = QuantizedVector({ 11,21,31,41,51, 12,22,32,42,52, 13,23,33,43,53, 14,24,34,44,54, 15,25,35,45,55, - }, qScale, qOffset))); + }, qScale, qOffset); // Use 1 batch of a 1-channel 4x4 kernel. armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType); - boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( + std::vector kernel = QuantizedVector({ -11,-21,-31,-41, -12,-22,-32,-42, -13,-23,-33,-43, -14,-24,-34,-44, }, - qScale, qOffset))); + qScale, qOffset); // Expected output is 1 batch of a 1-channel 5x5 image. armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType); - std::vector myVec(outputDesc.GetNumElements(), 0); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( + std::vector expectedOutput = QuantizedVector({ -7140, -10580, -13940, -9300, -5230, -9590, -14120, -18520, -12290, -6860, @@ -987,7 +997,7 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( -7518, -10904, -14144, -9318, -5152, -5032, -7256, -9376, -6142, -3368, }, - qScale, qOffset))); + qScale, qOffset); return SimpleConvolution2dTestImpl( workloadFactory, @@ -997,6 +1007,9 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( kernel, GetBias2(false, qScale * qScale), expectedOutput, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), qScale, qOffset, layout, @@ -1062,19 +1075,15 @@ LayerTestResult Convolution2d3x3DilationTestCommon( outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); - auto input = MakeTensor(inputTensorInfo, - std::vector(QuantizedVector(inputNoQuantizedValues, - inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset()))); - auto kernel = MakeTensor(kernelTensorInfo, - std::vector(QuantizedVector(kernelNoQuantizedValues, - kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset()))); - auto expectedOutput = - MakeTensor(outputTensorInfo, - std::vector(QuantizedVector(outputExpectedNoQuantizedValues, - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset()))); + auto input = QuantizedVector(inputNoQuantizedValues, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()); + auto kernel = QuantizedVector(kernelNoQuantizedValues, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()); + auto expectedOutput = QuantizedVector(outputExpectedNoQuantizedValues, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()); return SimpleConvolution2dTestImpl( workloadFactory, @@ -1084,6 +1093,9 @@ LayerTestResult Convolution2d3x3DilationTestCommon( kernel, GetBias2(biasEnabled, qScale * qScale), expectedOutput, + inputTensorInfo.GetShape(), + kernelTensorInfo.GetShape(), + outputTensorInfo.GetShape(), qScale, qOffset, layout, @@ -1105,7 +1117,7 @@ LayerTestResult Convolution2d3x3Dilation3x3Test( bool biasEnabled, const armnn::DataLayout layout) { - armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType); + armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType); std::vector inputNoQuantizedValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1163,7 +1175,7 @@ LayerTestResult Convolution2d2x3x3Dilation3x3Test( bool biasEnabled, const armnn::DataLayout layout) { - armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType); + armnn::TensorInfo inputTensorInfo({ 1, 2, 10, 10 }, ArmnnType); std::vector inputNoQuantizedValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1189,7 +1201,7 @@ LayerTestResult Convolution2d2x3x3Dilation3x3Test( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType); + armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3 }, ArmnnType); std::vector kernelNoQuantizedValues = { 1, 2, 3, @@ -1203,7 +1215,7 @@ LayerTestResult Convolution2d2x3x3Dilation3x3Test( // Since the dilation rate is 3 this will dilate the kernel to be like 7x7, // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1 - armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); std::vector outputExpectedNoQuantizedValues = { 12., 10., 10., 10., @@ -1230,13 +1242,13 @@ LayerTestResult Convolution2d2x3x3Dilation3x3Test( template LayerTestResult Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test( - armnn::IWorkloadFactory &workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout) { - armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType); + armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType); std::vector inputNoQuantizedValues = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -1251,7 +1263,7 @@ LayerTestResult Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType); + armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2 }, ArmnnType); std::vector kernelNoQuantizedValues = { 1, 2, @@ -1338,11 +1350,12 @@ LayerTestResult CompareConvolution2dTestImpl( kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType); biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType); - LayerTestResult ret(outputTensorInfo); + auto input = MakeRandomTensor(inputTensorInfo, 124908); + auto kernel = MakeRandomTensor(kernelDesc, 891234); + auto bias = MakeRandomTensor(biasDesc, 1028); - auto input = MakeRandomTensor(inputTensorInfo, 124908); - auto kernel = MakeRandomTensor(kernelDesc, 891234); - auto bias = MakeRandomTensor(biasDesc, 1028); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -1352,8 +1365,8 @@ LayerTestResult CompareConvolution2dTestImpl( armnn::ScopedTensorHandle weightsTensor(kernelDesc); armnn::ScopedTensorHandle biasTensor(biasDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data()); + AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data()); AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -1371,11 +1384,11 @@ LayerTestResult CompareConvolution2dTestImpl( std::unique_ptr inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo); armnn::Convolution2dQueueDescriptor refData = data; - armnn::WorkloadInfo refInfo = info; + armnn::WorkloadInfo refInfo = info; SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); - std::unique_ptr workload = workloadFactory.CreateConvolution2d(data, info); + std::unique_ptr workload = workloadFactory.CreateConvolution2d(data, info); std::unique_ptr workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo); outputHandleRef->Allocate(); @@ -1384,18 +1397,21 @@ LayerTestResult CompareConvolution2dTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); ExecuteWorkload(*workload, memoryManager); workloadRef->PostAllocationConfigure(); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult Convolution2d3x3Stride2x2BFloat16Test( @@ -1409,7 +1425,7 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16Test( armnn::IgnoreUnused(biasEnabled); // Input is a single-batch, 1 channel, 5x5 image. - armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16); + armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, armnn::DataType::BFloat16); std::vector inputValues = armnnUtils::QuantizedVector( { @@ -1441,8 +1457,6 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16Test( }, 1.0f, 0); - auto input = MakeTensor(inputDesc, inputValues); - // Use a 3x3 kernel. armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16); @@ -1460,10 +1474,8 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16Test( }, 1.0f, 0); - auto kernel = MakeTensor(kernelDesc, kernelValues); - // Expected output is a single-batch, 1 channel, 3x3 image. - armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32); + armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, armnn::DataType::Float32); // Expected output (with results if calculated as FP32 in the comments) const std::vector outputData = @@ -1479,8 +1491,6 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16Test( -20.625f // -20.63477281 }; - boost::multi_array expectedOutput = MakeTensor(outputDesc, outputData); - uint32_t padLeft = 1; uint32_t padTop = 1; uint32_t padRight = 1; @@ -1493,10 +1503,13 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16Test( workloadFactory, memoryManager, tensorHandleFactory, - input, - kernel, - boost::multi_array(), - expectedOutput, + inputValues, + kernelValues, + std::vector(), + outputData, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), dataLayout, 1.0f, 0, @@ -1551,8 +1564,6 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16SmallValueTest( }, 1.0f, 0); - auto input = MakeTensor(inputDesc, inputValues); - // Use a 3x3 kernel. armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16); @@ -1570,8 +1581,6 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16SmallValueTest( }, 1.0f, 0); - auto kernel = MakeTensor(kernelDesc, kernelValues); - // Expected output is a single-batch, 1 channel, 3x3 image. armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32); @@ -1589,8 +1598,6 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16SmallValueTest( -0.0346679688f // -0.034808 }; - boost::multi_array expectedOutput = MakeTensor(outputDesc, outputData); - uint32_t padLeft = 1; uint32_t padTop = 1; uint32_t padRight = 1; @@ -1603,10 +1610,13 @@ LayerTestResult Convolution2d3x3Stride2x2BFloat16SmallValueTest( workloadFactory, memoryManager, tensorHandleFactory, - input, - kernel, - boost::multi_array(), - expectedOutput, + inputValues, + kernelValues, + std::vector(), + outputData, + inputDesc.GetShape(), + kernelDesc.GetShape(), + outputDesc.GetShape(), dataLayout, 1.0f, 0, @@ -1628,10 +1638,13 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& kernel, - const boost::multi_array& bias, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& kernel, + const std::vector& bias, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& kernelShape, + const armnn::TensorShape& outputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout, @@ -1642,18 +1655,18 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( uint32_t strideX = 1, uint32_t strideY = 1) { - unsigned int inputNum = armnn::numeric_cast(input.shape()[0]); - unsigned int inputChannels = armnn::numeric_cast(input.shape()[1]); - unsigned int inputHeight = armnn::numeric_cast(input.shape()[2]); - unsigned int inputWidth = armnn::numeric_cast(input.shape()[3]); - unsigned int kernelChanMul = armnn::numeric_cast(kernel.shape()[0]); - unsigned int kernelChannels = armnn::numeric_cast(kernel.shape()[1]); - unsigned int kernelHeight = armnn::numeric_cast(kernel.shape()[2]); - unsigned int kernelWidth = armnn::numeric_cast(kernel.shape()[3]); - unsigned int outputNum = armnn::numeric_cast(outputExpected.shape()[0]); - unsigned int outputChannels = armnn::numeric_cast(outputExpected.shape()[1]); - unsigned int outputHeight = armnn::numeric_cast(outputExpected.shape()[2]); - unsigned int outputWidth = armnn::numeric_cast(outputExpected.shape()[3]); + unsigned int inputNum = armnn::numeric_cast(inputShape[0]); + unsigned int inputChannels = armnn::numeric_cast(inputShape[1]); + unsigned int inputHeight = armnn::numeric_cast(inputShape[2]); + unsigned int inputWidth = armnn::numeric_cast(inputShape[3]); + unsigned int kernelChanMul = armnn::numeric_cast(kernelShape[0]); + unsigned int kernelChannels = armnn::numeric_cast(kernelShape[1]); + unsigned int kernelHeight = armnn::numeric_cast(kernelShape[2]); + unsigned int kernelWidth = armnn::numeric_cast(kernelShape[3]); + unsigned int outputNum = armnn::numeric_cast(outputExpectedShape[0]); + unsigned int outputChannels = armnn::numeric_cast(outputExpectedShape[1]); + unsigned int outputHeight = armnn::numeric_cast(outputExpectedShape[2]); + unsigned int outputWidth = armnn::numeric_cast(outputExpectedShape[3]); // If a bias is used, its size must equal the number of output channels. bool biasEnabled = bias.size() > 0; @@ -1693,8 +1706,6 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( inputData = tmp; } - auto batchedInput = MakeTensor(inputTensorInfo, inputData); - // Construct the output data, with bias applied, as appropriate. std::vector outputData; outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth); @@ -1707,7 +1718,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( outputWidth, outputHeight); } - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); // At this point if we require it permute the expected output if (layout == armnn::DataLayout::NHWC) @@ -1717,19 +1728,17 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( outputData = tmp; } - ret.outputExpected = MakeTensor(outputTensorInfo, outputData); - std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::ScopedTensorHandle weightsTensor(kernelDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data()); armnn::ScopedTensorHandle biasTensor(biasDesc); if (biasEnabled) { - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data()); } armnn::DepthwiseConvolution2dQueueDescriptor data; @@ -1752,13 +1761,16 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -1829,14 +1841,11 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T)); inputData = tmp; } - auto input = MakeTensor(inputTensorInfo, inputData); std::vector biasV(QuantizedVector({ 0, 2 }, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset())); - auto bias = MakeTensor(biasDesc, biasV); - std::vector kernelData = std::vector( QuantizedVector({ 1.f, 0.f, 1.f, @@ -1850,8 +1859,6 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset())); - auto kernel = MakeTensor(kernelDesc, kernelData); - // Manually calculated. std::vector outputImage( QuantizedVector({ 0.f, 0.f }, @@ -1867,7 +1874,6 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( outputWidth, outputHeight); } - LayerTestResult ret(outputTensorInfo); if (layout == armnn::DataLayout::NHWC) { std::vector tmp(outputImage.size()); @@ -1875,7 +1881,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( outputImage = tmp; } - ret.outputExpected = MakeTensor(outputTensorInfo, outputImage); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -1885,8 +1891,8 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( armnn::ScopedTensorHandle weightsTensor(kernelDesc); armnn::ScopedTensorHandle biasTensor(biasDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); + AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data()); AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -1906,13 +1912,16 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputImage, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -1994,14 +2003,11 @@ LayerTestResult DepthwiseConvolution2dTestImpl( armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, originalInputData.data(), inputData.data(), sizeof(T)); } - auto input = MakeTensor(inputTensorInfo, inputData); std::vector biasV = QuantizedVector({ 0, 2, 1, -1 }, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset()); - auto bias = MakeTensor(biasDesc, biasV); - std::vector kernelData = std::vector( QuantizedVector({ 1, 1, 1, @@ -2031,8 +2037,6 @@ LayerTestResult DepthwiseConvolution2dTestImpl( kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset())); - auto kernel = MakeTensor(kernelDesc, kernelData); - // Manually calculated. std::vector originalOutputImage = std::vector( QuantizedVector({ @@ -2080,7 +2084,6 @@ LayerTestResult DepthwiseConvolution2dTestImpl( outputHeight); } - LayerTestResult ret(outputTensorInfo); std::vector outputImage = originalOutputImage; if (layout == armnn::DataLayout::NHWC) { @@ -2088,7 +2091,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( originalOutputImage.data(), outputImage.data(), sizeof(T)); } - ret.outputExpected = MakeTensor(outputTensorInfo, outputImage); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -2098,8 +2101,8 @@ LayerTestResult DepthwiseConvolution2dTestImpl( armnn::ScopedTensorHandle weightsTensor(kernelDesc); armnn::ScopedTensorHandle biasTensor(biasDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); + AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data()); AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -2119,13 +2122,17 @@ LayerTestResult DepthwiseConvolution2dTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + outputImage, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); - return ret; } template DepthwiseConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& originalInput, - const boost::multi_array& originalKernel, - const boost::multi_array& bias, - const boost::multi_array& originalOutputExpected, + const std::vector& originalInput, + const std::vector& originalKernel, + const std::vector& bias, + const std::vector& originalOutputExpected, + const armnn::TensorShape& originalInputShape, + const armnn::TensorShape& originalKernelShape, + const armnn::TensorShape& originalOutputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout = armnn::DataLayout::NCHW, @@ -2150,20 +2160,20 @@ LayerTestResult DepthwiseConvolution2dTestImpl( uint32_t dilationX = 1, uint32_t dilationY = 1) { - unsigned int inputHeight = armnn::numeric_cast(originalInput.shape()[2]); - unsigned int inputWidth = armnn::numeric_cast(originalInput.shape()[3]); - unsigned int inputChannels = armnn::numeric_cast(originalInput.shape()[1]); - unsigned int inputNum = armnn::numeric_cast(originalInput.shape()[0]); + unsigned int inputHeight = armnn::numeric_cast(originalInputShape[2]); + unsigned int inputWidth = armnn::numeric_cast(originalInputShape[3]); + unsigned int inputChannels = armnn::numeric_cast(originalInputShape[1]); + unsigned int inputNum = armnn::numeric_cast(originalInputShape[0]); - unsigned int outputHeight = armnn::numeric_cast(originalOutputExpected.shape()[2]); - unsigned int outputWidth = armnn::numeric_cast(originalOutputExpected.shape()[3]); - unsigned int outputChannels = armnn::numeric_cast(originalOutputExpected.shape()[1]); - unsigned int outputNum = armnn::numeric_cast(originalOutputExpected.shape()[0]); + unsigned int outputHeight = armnn::numeric_cast(originalOutputExpectedShape[2]); + unsigned int outputWidth = armnn::numeric_cast(originalOutputExpectedShape[3]); + unsigned int outputChannels = armnn::numeric_cast(originalOutputExpectedShape[1]); + unsigned int outputNum = armnn::numeric_cast(originalOutputExpectedShape[0]); - unsigned int kernelHeight = armnn::numeric_cast(originalKernel.shape()[2]); - unsigned int kernelWidth = armnn::numeric_cast(originalKernel.shape()[3]); - unsigned int kernelChannels = armnn::numeric_cast(originalKernel.shape()[1]); - unsigned int kernelDepthMul = armnn::numeric_cast(originalKernel.shape()[0]); + unsigned int kernelHeight = armnn::numeric_cast(originalKernelShape[2]); + unsigned int kernelWidth = armnn::numeric_cast(originalKernelShape[3]); + unsigned int kernelChannels = armnn::numeric_cast(originalKernelShape[1]); + unsigned int kernelDepthMul = armnn::numeric_cast(originalKernelShape[0]); bool biasEnabled = bias.size() > 0; @@ -2199,8 +2209,6 @@ LayerTestResult DepthwiseConvolution2dTestImpl( biasDesc.SetQuantizationOffset(0); } - LayerTestResult ret(outputTensorInfo); - // Construct input data std::vector input; input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth); @@ -2217,8 +2225,6 @@ LayerTestResult DepthwiseConvolution2dTestImpl( inputData = tmp; } - auto batchedInput = MakeTensor(inputTensorInfo, inputData); - std::vector output; output.assign(originalOutputExpected.data(), originalOutputExpected.data() + outputChannels*outputHeight*outputWidth); @@ -2233,6 +2239,8 @@ LayerTestResult DepthwiseConvolution2dTestImpl( outputWidth, outputHeight); } + std::vector actualOutput(outputTensorInfo.GetNumElements()); + // Construct expected output data std::vector outputData; outputData.insert(outputData.end(), output.begin(), output.end()); @@ -2245,7 +2253,6 @@ LayerTestResult DepthwiseConvolution2dTestImpl( armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T)); outputData = tmp; } - ret.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -2255,12 +2262,11 @@ LayerTestResult DepthwiseConvolution2dTestImpl( armnn::ScopedTensorHandle weightsTensor(kernelDesc); armnn::ScopedTensorHandle biasTensor(biasDesc); - boost::multi_array kernel = boost::multi_array(originalKernel); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, originalKernel.data()); if(biasEnabled) { - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data()); } AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); @@ -2283,13 +2289,16 @@ LayerTestResult DepthwiseConvolution2dTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template DepthwiseConvolution2dAsymmetricTestCommon( { // Use a single-batch 2-channel 5x5 image as input. armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); - auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector({ + auto input = QuantizedVector( + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, @@ -2320,12 +2329,12 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 45, 46, 47, 48, 49 }, inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset()))); + inputTensorInfo.GetQuantizationOffset()); // Use a depth multiplier of 1 on a 2-channel 4x4 kernel. armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); - auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector({ + auto kernel = QuantizedVector( + { 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, @@ -2337,13 +2346,13 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 4, 3, 2, 1 }, kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset()))); + kernelTensorInfo.GetQuantizationOffset()); // Expected output is 1 batch of a 2-channel 5x5 image. // Calculated using the python tensorflow library with strideX=1, strideY=1. armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); - boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector({ + auto expectedOutput = QuantizedVector( + { 1062, 1580, 1850, 1530, 1117, 2140, 3108, 3500, 2842, 2042, 3580, 5068, 5460, 4342, 3062, @@ -2357,7 +2366,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 3100, 4352, 4452, 3517, 2465 }, outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset()))); + outputTensorInfo.GetQuantizationOffset()); return DepthwiseConvolution2dAsymmetricTestImpl( workloadFactory, @@ -2367,6 +2376,9 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( kernel, GetBias2(biasEnabled, qScale * qScale), expectedOutput, + inputTensorInfo.GetShape(), + kernelTensorInfo.GetShape(), + outputTensorInfo.GetShape(), qScale, qOffset, layout, @@ -2391,8 +2403,8 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( auto layout = armnn::DataLayout::NHWC; armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType); - auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector({ + auto input = QuantizedVector( + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, @@ -2406,11 +2418,11 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 45, 46, 47, 48, 49 }, inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset()))); + inputTensorInfo.GetQuantizationOffset()); armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); - auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector({ + auto kernel = QuantizedVector( + { 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, @@ -2422,11 +2434,11 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 4, 3, 2, 1 }, kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset()))); + kernelTensorInfo.GetQuantizationOffset()); armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType); - boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector({ + auto expectedOutput = QuantizedVector( + { 1062, 1580, 1850, 1530, 1117, 2140, 3108, 3500, 2842, 2042, 3580, 5068, 5460, 4342, 3062, @@ -2440,7 +2452,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 3100, 4352, 4452, 3517, 2465 }, outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset()))); + outputTensorInfo.GetQuantizationOffset()); return DepthwiseConvolution2dTestImpl( workloadFactory, @@ -2450,6 +2462,9 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( kernel, GetBias2(biasEnabled, qScale * qScale), expectedOutput, + inputTensorInfo.GetShape(), + kernelTensorInfo.GetShape(), + outputTensorInfo.GetShape(), qScale, qOffset, layout, @@ -2473,9 +2488,9 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( { auto layout = armnn::DataLayout::NHWC; - armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType); - auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector({ + armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); + auto input = QuantizedVector( + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2487,17 +2502,17 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( 0, 0, 0, 0, 0, 0, 0, 0, 0 }, inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset()))); + inputTensorInfo.GetQuantizationOffset()); - armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType); - auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector({ + armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3 }, ArmnnType); + auto kernel = QuantizedVector( + { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset()))); + kernelTensorInfo.GetQuantizationOffset()); uint32_t padLeft = 0; uint32_t padTop = 0; @@ -2509,15 +2524,15 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( uint32_t dilationY = 3; // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s. - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType); - boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector({ + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); + auto expectedOutput = QuantizedVector( + { 5, 5, 5, 5, 5, 5, 5, 5, 5 }, outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset()))); + outputTensorInfo.GetQuantizationOffset()); return DepthwiseConvolution2dTestImpl( workloadFactory, @@ -2527,6 +2542,9 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( kernel, GetBias2(biasEnabled, qScale * qScale), expectedOutput, + inputTensorInfo.GetShape(), + kernelTensorInfo.GetShape(), + outputTensorInfo.GetShape(), qScale, qOffset, layout, @@ -2589,19 +2607,15 @@ LayerTestResult DepthwiseConvolution2d3x3DilationTestCommon( outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); - auto input = MakeTensor(inputTensorInfo, - std::vector(QuantizedVector(inputNoQuantizedValues, - inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset()))); - auto kernel = MakeTensor(kernelTensorInfo, - std::vector(QuantizedVector(kernelNoQuantizedValues, - kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset()))); - auto expectedOutput = - MakeTensor(outputTensorInfo, - std::vector(QuantizedVector(outputExpectedNoQuantizedValues, - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset()))); + auto input = QuantizedVector(inputNoQuantizedValues, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()); + auto kernel = QuantizedVector(kernelNoQuantizedValues, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()); + auto expectedOutput = QuantizedVector(outputExpectedNoQuantizedValues, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()); uint32_t padLeft = 0; uint32_t padTop = 0; @@ -2618,6 +2632,9 @@ LayerTestResult DepthwiseConvolution2d3x3DilationTestCommon( kernel, GetBias(biasEnabled, qScale * qScale, outputTensorInfo, layout), expectedOutput, + inputTensorInfo.GetShape(), + kernelTensorInfo.GetShape(), + outputTensorInfo.GetShape(), qScale, qOffset, layout, @@ -2965,7 +2982,6 @@ LayerTestResult CompareDepthwiseConvolution2dTestImpl( armnn::TensorInfo kernelDesc; armnn::TensorInfo biasDesc; - std::vector inputShape; std::vector outputShape; std::vector kernelShape{ channelMultiplier, inputChannels, kernelHeight, kernelWidth }; @@ -2992,15 +3008,14 @@ LayerTestResult CompareDepthwiseConvolution2dTestImpl( inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset); outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset); kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset); - biasDesc = armnn::TensorInfo( - 1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset); + biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset); - LayerTestResult ret(outputTensorInfo); + auto input = MakeRandomTensor(inputTensorInfo, 124908, 0.0f, 255.0f); + auto kernel = MakeRandomTensor(kernelDesc, 891234, 0.0f, 255.0f); + auto bias = MakeRandomTensor::Type>(biasDesc, 1028, 0.0f, 255.0f); - auto input = MakeRandomTensor(inputTensorInfo, 124908, 0.0f, 255.0f); - auto kernel = MakeRandomTensor(kernelDesc, 891234, 0.0f, 255.0f); - auto bias = MakeRandomTensor::Type, 1>( - biasDesc, 1028, 0.0f, 255.0f); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -3010,8 +3025,8 @@ LayerTestResult CompareDepthwiseConvolution2dTestImpl( armnn::ScopedTensorHandle weightsTensor(kernelDesc); armnn::ScopedTensorHandle biasTensor(biasDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data()); + AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data()); AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -3043,18 +3058,21 @@ LayerTestResult CompareDepthwiseConvolution2dTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); ExecuteWorkload(*workload, memoryManager); workloadRef->PostAllocationConfigure(); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } // @@ -3486,6 +3504,8 @@ LayerTestResult Convolution2dPerAxisQuantTest( PermuteTensorNhwcToNchw(outputInfo, expectedOutputData); } + std::vector actualOutput(outputInfo.GetNumElements()); + Convolution2dDescriptor descriptor; descriptor.m_StrideX = 1; descriptor.m_StrideY = 1; @@ -3496,11 +3516,9 @@ LayerTestResult Convolution2dPerAxisQuantTest( descriptor.m_BiasEnabled = true; descriptor.m_DataLayout = layout; - std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); - WorkloadInfo workloadInfo; ScopedTensorHandle weightTensor(kernelInfo); ScopedTensorHandle biasTensor(biasInfo); @@ -3524,11 +3542,12 @@ LayerTestResult Convolution2dPerAxisQuantTest( ExecuteWorkload(*workload, memoryManager); - LayerTestResult ret(outputInfo); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); - ret.outputExpected = MakeTensor(outputInfo, expectedOutputData); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputInfo.GetShape()); } LayerTestResult CompareConvolution2dTest( @@ -3580,7 +3599,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul64Test( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32); - auto input = MakeTensor(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f }); + std::vector input = { 1.f, 2.f, 3.f, 4.f }; std::vector kernelData; std::vector singleDepthKernel{ 1.f, -1.f, -1.f, 1.f }; @@ -3589,20 +3608,21 @@ LayerTestResult DepthwiseConvolution2dDepthMul64Test( kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end()); } armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32); - auto kernel = MakeTensor(kernelTensorInfo, kernelData); std::vector expectedOutputData(64, 0.f); armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32); - auto expectedOutput = MakeTensor(outputTensorInfo, expectedOutputData); return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, - kernel, - boost::multi_array(), - expectedOutput, + kernelData, + std::vector(), + expectedOutputData, + inputTensorInfo.GetShape(), + kernelTensorInfo.GetShape(), + outputTensorInfo.GetShape(), 0.f, 0, armnn::DataLayout::NCHW); @@ -3740,6 +3760,8 @@ LayerTestResult DepthwiseConvolution2dPerAxisQuantTest( PermuteTensorNhwcToNchw(outputInfo, expectedOutputData); } + std::vector actualOutput(outputInfo.GetNumElements()); + DepthwiseConvolution2dDescriptor descriptor; descriptor.m_StrideX = 1; descriptor.m_StrideY = 1; @@ -3780,10 +3802,12 @@ LayerTestResult DepthwiseConvolution2dPerAxisQuantTest( LayerTestResult ret(outputInfo); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); - ret.outputExpected = MakeTensor(outputInfo, expectedOutputData); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputInfo.GetShape()); } LayerTestResult CompareDepthwiseConvolution2dFloatTest( diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp index fdc6220d51..b16ce47c8f 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp @@ -23,16 +23,16 @@ LayerTestResult ConvertBf16ToFp32Test( std::vector inputValues = armnnUtils::QuantizedVector( { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }, 1.0f, 0); - auto input = MakeTensor(inputTensorInfo, std::vector(inputValues)); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, - { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = + { + -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f + }; std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -47,11 +47,14 @@ LayerTestResult ConvertBf16ToFp32Test( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp index 8745a5293b..177acef772 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp @@ -24,14 +24,19 @@ LayerTestResult SimpleConvertFp16ToFp32Test( const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16); const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - auto input = MakeTensor(inputTensorInfo, - { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h, - 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h }); + std::vector input = + { + -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h, + 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h + }; - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, - { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); + std::vector expectedOutput = + { + -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -46,11 +51,14 @@ LayerTestResult SimpleConvertFp16ToFp32Test( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp index db832594cd..9ab3746b61 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp @@ -20,8 +20,9 @@ LayerTestResult ConvertFp32ToBf16Test( const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32); const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16); - auto input = MakeTensor(inputTensorInfo, - { -37.5f, -15.2f, -8.76f, + std::vector input = + { + -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, 1.0f, 0.4f, 0.5f, @@ -33,13 +34,13 @@ LayerTestResult ConvertFp32ToBf16Test( -3.8f, // 0xC0733333 Round down -3.1055E+29f, // 0xF07ADC3C Round up -9.149516E-10f // 0xB07B7FFF Round down - }); + }; - std::vector outputValues = armnnUtils::QuantizedVector( + std::vector expectedOutput = armnnUtils::QuantizedVector( { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, + -37.5f, -15.2f, -8.76f, + -2.0f, -1.5f, -1.3f, + -0.5f, -0.4f, 0.0f, 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f, @@ -52,8 +53,7 @@ LayerTestResult ConvertFp32ToBf16Test( }, 1.0f, 0); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputValues); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -68,11 +68,15 @@ LayerTestResult ConvertFp32ToBf16Test( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); - return ret; } diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp index 5fbec56435..9946801aab 100644 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp @@ -22,14 +22,19 @@ LayerTestResult SimpleConvertFp32ToFp16Test( const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16); - auto input = MakeTensor(inputTensorInfo, - { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); + std::vector input = + { + -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f + }; - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, - { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h, - 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h }); + std::vector expectedOutput = + { + -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h, + 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -44,11 +49,14 @@ LayerTestResult SimpleConvertFp32ToFp16Test( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp index f2127c0f0c..97204750d0 100644 --- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp @@ -40,12 +40,10 @@ LayerTestResult DebugTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = - MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = - MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset); ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); @@ -73,9 +71,12 @@ LayerTestResult DebugTestImpl( BOOST_TEST(oss.str() == expectedStringOutput); - CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template > diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp index a2a5483844..7495c6b5b3 100644 --- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp @@ -44,12 +44,10 @@ LayerTestResult DepthToSpaceTestImpl( outputInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = - MakeTensor(inputInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); - LayerTestResult result(outputInfo); - result.outputExpected = - MakeTensor(outputInfo, armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset)); + std::vector actualOutput(outputInfo.GetNumElements()); + std::vector expectedOutput = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputInfo); @@ -65,12 +63,16 @@ LayerTestResult DepthToSpaceTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); - return result; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp index f60b42cae5..924844d92f 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp @@ -27,10 +27,8 @@ LayerTestResult DequantizeTestImpl( armnn::DequantizeQueueDescriptor descriptor) { IgnoreUnused(memoryManager); - boost::multi_array input = MakeTensor(inputTensorInfo, inputData); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); @@ -46,13 +44,16 @@ LayerTestResult DequantizeTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template ::GetFactory(memoryManager); auto tensorHandleFactory = WorkloadFactoryHelper::GetTensorHandleFactory(memoryManager); - auto boxEncodings = MakeTensor(boxEncodingsInfo, boxEncodingsData); - auto scores = MakeTensor(scoresInfo, scoresData); - auto anchors = MakeTensor(anchorsInfo, anchorsData); - armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32); - armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32); armnn::TensorInfo detectionClassesInfo({ 1, 3 }, armnn::DataType::Float32); + armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32); armnn::TensorInfo numDetectionInfo({ 1 }, armnn::DataType::Float32); - LayerTestResult detectionBoxesResult(detectionBoxesInfo); - detectionBoxesResult.outputExpected = MakeTensor(detectionBoxesInfo, expectedDetectionBoxes); - LayerTestResult detectionClassesResult(detectionClassesInfo); - detectionClassesResult.outputExpected = MakeTensor(detectionClassesInfo, expectedDetectionClasses); - LayerTestResult detectionScoresResult(detectionScoresInfo); - detectionScoresResult.outputExpected = MakeTensor(detectionScoresInfo, expectedDetectionScores); - LayerTestResult numDetectionsResult(numDetectionInfo); - numDetectionsResult.outputExpected = MakeTensor(numDetectionInfo, expectedNumDetections); + std::vector actualDetectionBoxesOutput(detectionBoxesInfo.GetNumElements()); + std::vector actualDetectionClassesOutput(detectionClassesInfo.GetNumElements()); + std::vector actualDetectionScoresOutput(detectionScoresInfo.GetNumElements()); + std::vector actualNumDetectionOutput(numDetectionInfo.GetNumElements()); auto boxedHandle = tensorHandleFactory.CreateTensorHandle(boxEncodingsInfo); auto scoreshandle = tensorHandleFactory.CreateTensorHandle(scoresInfo); @@ -182,7 +174,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo, auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo); armnn::ScopedTensorHandle anchorsTensor(anchorsInfo); - AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]); + AllocateAndCopyDataToITensorHandle(&anchorsTensor, anchorsData.data()); armnn::DetectionPostProcessQueueDescriptor data; data.m_Parameters.m_UseRegularNms = useRegularNms; @@ -200,7 +192,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo, armnn::WorkloadInfo info; AddInputToWorkload(data, info, boxEncodingsInfo, boxedHandle.get()); - AddInputToWorkload(data, info, scoresInfo, scoreshandle.get()); + AddInputToWorkload(data, info, scoresInfo, scoreshandle.get()); AddOutputToWorkload(data, info, detectionBoxesInfo, outputBoxesHandle.get()); AddOutputToWorkload(data, info, detectionClassesInfo, classesHandle.get()); AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get()); @@ -215,23 +207,38 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo, outputScoresHandle->Allocate(); numDetectionHandle->Allocate(); - CopyDataToITensorHandle(boxedHandle.get(), boxEncodings.origin()); - CopyDataToITensorHandle(scoreshandle.get(), scores.origin()); + CopyDataToITensorHandle(boxedHandle.get(), boxEncodingsData.data()); + CopyDataToITensorHandle(scoreshandle.get(), scoresData.data()); workload->Execute(); - CopyDataFromITensorHandle(detectionBoxesResult.output.origin(), outputBoxesHandle.get()); - CopyDataFromITensorHandle(detectionClassesResult.output.origin(), classesHandle.get()); - CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get()); - CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get()); + CopyDataFromITensorHandle(actualDetectionBoxesOutput.data(), outputBoxesHandle.get()); + CopyDataFromITensorHandle(actualDetectionClassesOutput.data(), classesHandle.get()); + CopyDataFromITensorHandle(actualDetectionScoresOutput.data(), outputScoresHandle.get()); + CopyDataFromITensorHandle(actualNumDetectionOutput.data(), numDetectionHandle.get()); - auto result = CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected); + auto result = CompareTensors(actualDetectionBoxesOutput, + expectedDetectionBoxes, + outputBoxesHandle->GetShape(), + detectionBoxesInfo.GetShape()); BOOST_TEST(result.m_Result, result.m_Message.str()); - result = CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected); + + result = CompareTensors(actualDetectionClassesOutput, + expectedDetectionClasses, + classesHandle->GetShape(), + detectionClassesInfo.GetShape()); BOOST_TEST(result.m_Result, result.m_Message.str()); - result = CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected); + + result = CompareTensors(actualDetectionScoresOutput, + expectedDetectionScores, + outputScoresHandle->GetShape(), + detectionScoresInfo.GetShape()); BOOST_TEST(result.m_Result, result.m_Message.str()); - result = CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected); + + result = CompareTensors(actualNumDetectionOutput, + expectedNumDetections, + numDetectionHandle->GetShape(), + numDetectionInfo.GetShape()); BOOST_TEST(result.m_Result, result.m_Message.str()); } diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp index ec5bfb0396..88f34f6add 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp @@ -25,43 +25,38 @@ template std::unique_ptr CreateWorkload( - const armnn::IWorkloadFactory& workloadFactory, - const armnn::WorkloadInfo& info, - const DescriptorType& descriptor) -{ + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const DescriptorType& descriptor) { return CreateWorkload(workloadFactory, info, descriptor); } -template , - typename TOutput = armnn::ResolveType> +template, + typename TOutput = armnn::ResolveType> LayerTestResult ElementwiseTestHelper( - armnn::IWorkloadFactory & workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, - const unsigned int shape0[NumDims], - std::vector values0, - float quantScale0, - int quantOffset0, - const unsigned int shape1[NumDims], - std::vector values1, - float quantScale1, - int quantOffset1, - const unsigned int outShape[NumDims], - std::vector outValues, - const armnn::ITensorHandleFactory& tensorHandleFactory, - float outQuantScale, - int outQuantOffset) -{ + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const unsigned int shape0[NumDims], + std::vector values0, + float quantScale0, + int quantOffset0, + const unsigned int shape1[NumDims], + std::vector values1, + float quantScale1, + int quantOffset1, + const unsigned int outShape[NumDims], + std::vector outValues, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float outQuantScale, + int outQuantOffset) { armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput}; armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput}; armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput}; - auto input0 = MakeTensor(inputTensorInfo0, values0); - auto input1 = MakeTensor(inputTensorInfo1, values1); - inputTensorInfo0.SetQuantizationScale(quantScale0); inputTensorInfo0.SetQuantizationOffset(quantOffset0); @@ -71,11 +66,12 @@ LayerTestResult ElementwiseTestHelper( outputTensorInfo.SetQuantizationScale(outQuantScale); outputTensorInfo.SetQuantizationOffset(outQuantOffset); - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); - if(ArmnnTypeOutput == armnn::DataType::Boolean) + bool isBoolean = false; + if (ArmnnTypeOutput == armnn::DataType::Boolean) { - ret.compareBoolean = true; + isBoolean = true; } std::unique_ptr inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0); @@ -93,121 +89,121 @@ LayerTestResult ElementwiseTestHelper( inputHandle1->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle0.get(), input0.origin()); - CopyDataToITensorHandle(inputHandle1.get(), input1.origin()); + CopyDataToITensorHandle(inputHandle0.get(), values0.data()); + CopyDataToITensorHandle(inputHandle1.get(), values1.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - ret.outputExpected = MakeTensor(outputTensorInfo, outValues); - return ret; + return LayerTestResult(actualOutput, + outValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape(), + isBoolean); } -template > +template> LayerTestResult ElementwiseTestHelper( - armnn::IWorkloadFactory & workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, - const unsigned int shape0[NumDims], - std::vector values0, - float quantScale0, - int quantOffset0, - const unsigned int shape1[NumDims], - std::vector values1, - float quantScale1, - int quantOffset1, - const unsigned int outShape[NumDims], - std::vector outValues, - const armnn::ITensorHandleFactory& tensorHandleFactory, - float outQuantScale, - int outQuantOffset) -{ + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const unsigned int shape0[NumDims], + std::vector values0, + float quantScale0, + int quantOffset0, + const unsigned int shape1[NumDims], + std::vector values1, + float quantScale1, + int quantOffset1, + const unsigned int outShape[NumDims], + std::vector outValues, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float outQuantScale, + int outQuantOffset) { return ElementwiseTestHelper( - workloadFactory, - memoryManager, - shape0, - values0, - quantScale0, - quantOffset0, - shape1, - values1, - quantScale1, - quantOffset1, - outShape, - outValues, - tensorHandleFactory, - outQuantScale, - outQuantOffset); + workloadFactory, + memoryManager, + shape0, + values0, + quantScale0, + quantOffset0, + shape1, + values1, + quantScale1, + quantOffset1, + outShape, + outValues, + tensorHandleFactory, + outQuantScale, + outQuantOffset); } -template , - typename TOutput = armnn::ResolveType> +template, + typename TOutput = armnn::ResolveType> LayerTestResult ElementwiseTestHelper( - armnn::IWorkloadFactory & workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, - const unsigned int shape0[NumDims], - std::vector values0, - const unsigned int shape1[NumDims], - std::vector values1, - const unsigned int outShape[NumDims], - std::vector outValues, - const armnn::ITensorHandleFactory& tensorHandleFactory, - float quantScale = 1.0f, - int quantOffset = 0) -{ + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const unsigned int shape0[NumDims], + std::vector values0, + const unsigned int shape1[NumDims], + std::vector values1, + const unsigned int outShape[NumDims], + std::vector outValues, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float quantScale = 1.0f, + int quantOffset = 0) { return ElementwiseTestHelper( - workloadFactory, - memoryManager, - shape0, - values0, - quantScale, - quantOffset, - shape1, - values1, - quantScale, - quantOffset, - outShape, - outValues, - tensorHandleFactory, - quantScale, - quantOffset); + workloadFactory, + memoryManager, + shape0, + values0, + quantScale, + quantOffset, + shape1, + values1, + quantScale, + quantOffset, + outShape, + outValues, + tensorHandleFactory, + quantScale, + quantOffset); } -template > +template> LayerTestResult ElementwiseTestHelper( - armnn::IWorkloadFactory & workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, - const unsigned int shape0[NumDims], - std::vector values0, - const unsigned int shape1[NumDims], - std::vector values1, - const unsigned int outShape[NumDims], - std::vector outValues, - const armnn::ITensorHandleFactory& tensorHandleFactory, - float quantScale = 1.0f, - int quantOffset = 0) -{ + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const unsigned int shape0[NumDims], + std::vector values0, + const unsigned int shape1[NumDims], + std::vector values1, + const unsigned int outShape[NumDims], + std::vector outValues, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float quantScale = 1.0f, + int quantOffset = 0) { return ElementwiseTestHelper( - workloadFactory, - memoryManager, - shape0, - values0, - shape1, - values1, - outShape, - outValues, - tensorHandleFactory, - quantScale, - quantOffset); + workloadFactory, + memoryManager, + shape0, + values0, + shape1, + values1, + outShape, + outValues, + tensorHandleFactory, + quantScale, + quantOffset); } \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp index 5d37e934ea..20e341b4e2 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp @@ -55,9 +55,9 @@ LayerTestResult ElementwiseUnaryTestHelper( outputTensorInfo.SetQuantizationScale(outQuantScale); outputTensorInfo.SetQuantizationOffset(outQuantOffset); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType(values, inputTensorInfo)); - - LayerTestResult ret(outputTensorInfo); + std::vector input = ConvertToDataType(values, inputTensorInfo); + std::vector expectedOutput = ConvertToDataType(outValues, inputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -73,16 +73,18 @@ LayerTestResult ElementwiseUnaryTestHelper( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); - ret.outputExpected = MakeTensor(outputTensorInfo, ConvertToDataType(outValues, - inputTensorInfo)); - return ret; } template FakeQuantizationTest( constexpr unsigned int width = 2; constexpr unsigned int height = 3; - const armnn::TensorInfo tensorInfo({height, width }, - armnn::DataType::Float32); + const armnn::TensorInfo tensorInfo({ height, width }, armnn::DataType::Float32); - auto input = MakeTensor(tensorInfo, std::vector({ + std::vector input = + { -10.0f, -5.0f, 0.0f, 5.0f, 10.0f, 10.0f - })); + }; - LayerTestResult ret(tensorInfo); + std::vector actualOutput(tensorInfo.GetNumElements()); + std::vector expectedOutput(tensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo); @@ -48,7 +49,7 @@ LayerTestResult FakeQuantizationTest( data.m_Parameters.m_Min = min; data.m_Parameters.m_Max = max; - armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]); + armnn::PassthroughTensorHandle refHandle(tensorInfo, expectedOutput.data()); armnn::FakeQuantizationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle); @@ -58,18 +59,22 @@ LayerTestResult FakeQuantizationTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - ret.outputExpected = MakeTensor(tensorInfo, std::vector({ + expectedOutput = + { 0.0f, 63.0f, 128.0f, 191.0f, 255.0f, 255.0f - })); + }; - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + tensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp index f96d33735f..9208a311a7 100644 --- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp @@ -21,15 +21,15 @@ LayerTestResult SimpleFillTest( armnn::TensorInfo inputTensorInfo({4}, armnn::DataType::Signed32); armnn::TensorInfo outputTensorInfo({2, 2, 3, 2}, ArmnnType); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( - {2, 2, 3, 2}, - inputTensorInfo)); + std::vector input = ConvertToDataType( { 2, 2, 3, 2 }, inputTensorInfo); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, ConvertToDataType( - { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f }, - outputTensorInfo)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = ConvertToDataType( + { + 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f + }, + outputTensorInfo); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -45,13 +45,16 @@ LayerTestResult SimpleFillTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } // diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp index 46f384266b..bf871ae2f4 100644 --- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp @@ -24,16 +24,20 @@ LayerTestResult SimpleFloorTest( armnn::TensorInfo outputTensorInfo(inputTensorInfo); outputTensorInfo.SetQuantizationScale(0.1f); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( - { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }, - inputTensorInfo)); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, ConvertToDataType( - { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f, - 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f }, - outputTensorInfo)); + std::vector input = ConvertToDataType( + { + -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f + }, + inputTensorInfo); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = ConvertToDataType( + { + -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f, + 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f + }, + outputTensorInfo); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -48,13 +52,16 @@ LayerTestResult SimpleFloorTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } // diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp index cd7f4efe31..c47048e566 100644 --- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp @@ -29,9 +29,9 @@ LayerTestResult SimpleFullyConnectedTestImpl( armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, - boost::multi_array& weights, - boost::multi_array& bias, - boost::multi_array& input, + std::vector& weights, + std::vector& bias, + std::vector& input, bool biasEnabled, bool transposeWeights) { @@ -43,8 +43,10 @@ LayerTestResult SimpleFullyConnectedTestImpl( armnn::ScopedTensorHandle weightsTensor(weightsDesc); armnn::ScopedTensorHandle biasTensor(biasesDesc); - AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); - AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data()); + AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data()); AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -58,11 +60,12 @@ LayerTestResult SimpleFullyConnectedTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + result.m_ActualData = actualOutput; return result; } @@ -76,9 +79,9 @@ LayerTestResult SimpleFullyConnectedTestWeightsAsInputsImpl( armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, - boost::multi_array& weights, - boost::multi_array& bias, - boost::multi_array& input, + std::vector& weights, + std::vector& bias, + std::vector& input, bool biasEnabled, bool transposeWeights) { @@ -86,6 +89,8 @@ LayerTestResult SimpleFullyConnectedTestWeightsAsInputsImpl( std::unique_ptr input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + armnn::FullyConnectedQueueDescriptor data; armnn::WorkloadInfo info; @@ -109,17 +114,18 @@ LayerTestResult SimpleFullyConnectedTestWeightsAsInputsImpl( input0Handle->Allocate(); input1Handle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(input0Handle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(input1Handle.get(), &weights[0][0]); + CopyDataToITensorHandle(input0Handle.get(), input.data()); + CopyDataToITensorHandle(input1Handle.get(), weights.data()); if (biasEnabled) { input2Handle->Allocate(); - CopyDataToITensorHandle(input2Handle.get(), &bias[0]); + CopyDataToITensorHandle(input2Handle.get(), bias.data()); } ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + result.m_ActualData = actualOutput; return result; } @@ -158,21 +164,21 @@ LayerTestResult FullyConnectedTest( LayerTestResult result(outputTensorInfo); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( + std::vector input = ConvertToDataType( { -1.2f, 6.1f, -3.5f, 18.8f, -5.5f, 2.9f }, - inputTensorInfo)); + inputTensorInfo); - auto weights = MakeTensor(weightsDesc, ConvertToDataType( + std::vector weights = ConvertToDataType( { -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f, 23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f }, - weightsDesc)); + weightsDesc); - auto bias = MakeTensor(biasesDesc, std::vector{9250, 67500}); + std::vector bias = {9250, 67500}; if (constantWeights) { @@ -207,13 +213,11 @@ LayerTestResult FullyConnectedTest( if (biasEnabled) { - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType({80.f, 1460.f}, outputTensorInfo)); + result.m_ExpectedData = ConvertToDataType({80.f, 1460.f}, outputTensorInfo); } else { - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType({-107.04f, 110.f}, outputTensorInfo)); + result.m_ExpectedData = ConvertToDataType({-107.04f, 110.f}, outputTensorInfo); } return result; @@ -274,22 +278,19 @@ LayerTestResult FullyConnectedLargeTestCommon( LayerTestResult result(outputTensorInfo); - boost::multi_array input = MakeTensor(inputTensorInfo, - armnnUtils::QuantizedVector({ + std::vector input = armnnUtils::QuantizedVector( + { 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f, }, - qScale, qOffset) - ); + qScale, qOffset); - boost::multi_array weights = MakeTensor(weightsDesc, - armnnUtils::QuantizedVector({ + std::vector weights = armnnUtils::QuantizedVector( + { 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }, - qScale, qOffset) - ); + qScale, qOffset); std::vector biasValues({900000.f}); - boost::multi_array bias = MakeTensor(biasesDesc, biasValues); result = SimpleFullyConnectedTestImpl( workloadFactory, @@ -297,12 +298,11 @@ LayerTestResult FullyConnectedLargeTestCommon( tensorHandleFactory, inputTensorInfo, outputTensorInfo, weightsDesc, biasesDesc, - weights, bias, input, + weights, biasValues, input, true, transposeWeights ); - result.outputExpected = MakeTensor(outputTensorInfo, - armnnUtils::QuantizedVector({ 965432.0f }, qScale, qOffset)); + result.m_ExpectedData = armnnUtils::QuantizedVector({ 965432.0f }, qScale, qOffset); return result; } @@ -370,40 +370,36 @@ LayerTestResult FullyConnectedFloat32Test( LayerTestResult result(outputTensorInfo); - boost::multi_array input = MakeTensor(inputTensorInfo, std::vector( - { - 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, - - 5.0f, 4.0f, 3.0f, 2.0f, 1.0f - }) - ); + std::vector input = + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 5.0f, 4.0f, 3.0f, 2.0f, 1.0f + }; - boost::multi_array weights = MakeTensor(weightsDesc, std::vector( - { - .5f, 2.f, .5f, - .5f, 2.f, 1.f, - .5f, 2.f, 2.f, - .5f, 2.f, 3.f, - .5f, 2.f, 4.f - })); + std::vector weights = + { + .5f, 2.f, .5f, + .5f, 2.f, 1.f, + .5f, 2.f, 2.f, + .5f, 2.f, 3.f, + .5f, 2.f, 4.f + }; if (transposeWeights) { - weights = MakeTensor(weightsDesc, std::vector( + weights = { .5f, .5f, .5f, .5f, .5f, 2.f, 2.f, 2.f, 2.f, 2.f, .5f, 1.f, 2.f, 3.f, 4.f - })); + }; } - std::vector biasValues({0.f, 0.f, 0.f}); if (biasEnabled) { - biasValues = std::vector({10.f, 20.f, 30.f}); + biasValues = std::vector({10.f, 20.f, 30.f}); } - boost::multi_array bias = MakeTensor(biasesDesc, biasValues); result = SimpleFullyConnectedTestImpl( workloadFactory, @@ -411,21 +407,21 @@ LayerTestResult FullyConnectedFloat32Test( tensorHandleFactory, inputTensorInfo, outputTensorInfo, weightsDesc, biasesDesc, - weights, bias, input, + weights, biasValues, input, biasEnabled, transposeWeights ); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector( - { - 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0], - 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1], - 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2], - - 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0], - 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1], - 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2] - }) - ); + std::vector expectedOutput = + { + 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0], + 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1], + 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2], + + 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0], + 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1], + 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2] + }; + result.m_ExpectedData = expectedOutput; return result; } diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp index 7fabff6c1c..51df1eb847 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp @@ -33,11 +33,8 @@ LayerTestResult GatherTestImpl( const std::vector& outputData) { IgnoreUnused(memoryManager); - auto params = MakeTensor(paramsInfo, paramsData); - auto indices = MakeTensor(indicesInfo, indicesData); - LayerTestResult result(outputInfo); - result.outputExpected = MakeTensor(outputInfo, outputData); + std::vector actualOutput(outputInfo.GetNumElements()); std::unique_ptr paramsHandle = tensorHandleFactory.CreateTensorHandle(paramsInfo); std::unique_ptr indicesHandle = tensorHandleFactory.CreateTensorHandle(indicesInfo); @@ -55,14 +52,17 @@ LayerTestResult GatherTestImpl( indicesHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(paramsHandle.get(), params.origin()); - CopyDataToITensorHandle(indicesHandle.get(), indices.origin()); + CopyDataToITensorHandle(paramsHandle.get(), paramsData.data()); + CopyDataToITensorHandle(indicesHandle.get(), indicesData.data()); workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp index 24a4dc4789..ed656daa02 100644 --- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp @@ -36,19 +36,15 @@ LayerTestResult InstanceNormTestImpl( int32_t qOffset = 0) { IgnoreUnused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, - armnnUtils::QuantizedVector(inputValues, qScale, qOffset)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset)); + std::vector inputTensor = armnnUtils::QuantizedVector(inputValues, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::WorkloadInfo info; - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); @@ -57,13 +53,16 @@ LayerTestResult InstanceNormTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp index 227ac63941..e242fd31d3 100644 --- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp @@ -30,7 +30,7 @@ LayerTestResult L2NormalizationTestImpl( const std::vector& inputValues, float outScale, int32_t outOffset, - const std::vector& expectedOutputValues, + std::vector& expectedOutputValues, const armnn::DataLayout layout, float epsilon = 1e-12f) { @@ -48,26 +48,23 @@ LayerTestResult L2NormalizationTestImpl( inputData = tmp; } - auto inputTensor = MakeTensor(inputTensorInfo, - armnnUtils::QuantizedVector(inputData, - inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset())); + auto inputTensor = armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); - std::vector expectedOutputData = expectedOutputValues; if (layout == armnn::DataLayout::NHWC) { - std::vector tmp(expectedOutputData.size()); - armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(), + std::vector tmp(expectedOutputValues.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputValues.data(), tmp.data(), sizeof(float)); - expectedOutputData = tmp; + expectedOutputValues = tmp; } - LayerTestResult result(outputTensorInfo); - result.outputExpected = - MakeTensor(outputTensorInfo, - armnnUtils::QuantizedVector(expectedOutputData, - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset())); + std::vector expectedOutputData = armnnUtils::QuantizedVector(expectedOutputValues, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -85,14 +82,17 @@ LayerTestResult L2NormalizationTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } float CalcInvL2Norm(std::initializer_list elements) @@ -725,10 +725,7 @@ LayerTestResult L2Normalization2dShapeTest( const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0); const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0); - auto inputTensor = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -746,14 +743,17 @@ LayerTestResult L2Normalization2dShapeTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult L2Normalization3dTest( diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp index c64fc88024..ac60764964 100644 --- a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp +++ b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp @@ -8,38 +8,56 @@ #include #include -#include - #include - -template -boost::array GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo) -{ - ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(), - "Attempting to construct a shape array of mismatching size"); - - boost::array shape; - for (unsigned int i = 0; i < n; i++) - { - shape[i] = tensorInfo.GetShape()[i]; - } - return shape; -} +#include template struct LayerTestResult { LayerTestResult(const armnn::TensorInfo& outputInfo) + : m_Supported(true) + , m_CompareBoolean(false) { - auto shape( GetTensorShapeAsArray(outputInfo) ); - output.resize(shape); - outputExpected.resize(shape); - supported = true; - compareBoolean = false; + m_ActualData.reserve(outputInfo.GetNumElements()); + m_ExpectedData.reserve(outputInfo.GetNumElements()); + m_ActualShape = outputInfo.GetShape(); + m_ExpectedShape = outputInfo.GetShape(); } - boost::multi_array output; - boost::multi_array outputExpected; - bool supported; - bool compareBoolean; + LayerTestResult(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape) + : m_ActualData(actualData) + , m_ExpectedData(expectedData) + , m_ActualShape(actualShape) + , m_ExpectedShape(expectedShape) + , m_Supported(true) + , m_CompareBoolean(false) + {} + + LayerTestResult(const std::vector& actualData, + const std::vector& expectedData, + const armnn::TensorShape& actualShape, + const armnn::TensorShape& expectedShape, + const bool compareBoolean) + : m_ActualData(actualData) + , m_ExpectedData(expectedData) + , m_ActualShape(actualShape) + , m_ExpectedShape(expectedShape) + , m_Supported(true) + , m_CompareBoolean(compareBoolean) + {} + + std::vector m_ActualData; + std::vector m_ExpectedData; + armnn::TensorShape m_ActualShape; + armnn::TensorShape m_ExpectedShape; + + bool m_Supported; + bool m_CompareBoolean; }; + + + + diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp index f32d367d37..ad23f8f380 100644 --- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp @@ -38,9 +38,11 @@ LayerTestResult LogSoftmaxTestImpl( int32_t qOffset = 0) { IgnoreUnused(memoryManager); - LayerTestResult result(outputInfo); - result.outputExpected = - MakeTensor(outputInfo, armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset)); + + auto inputTensor = armnnUtils::QuantizedVector(inputValues, qScale, qOffset); + + std::vector actualOutput(outputInfo.GetNumElements()); + std::vector expectedOutput = armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); @@ -55,14 +57,17 @@ LayerTestResult LogSoftmaxTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - auto inputTensor = MakeTensor(inputInfo, armnnUtils::QuantizedVector(inputValues, qScale, qOffset)); - CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin()); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputInfo.GetShape()); - return result; } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp index 4f04673171..119e76bda9 100644 --- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp @@ -35,9 +35,7 @@ LayerTestResult LogicalUnaryTestHelper( ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean); - auto inputTensor = MakeTensor(inputTensorInfo, input); - - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -55,16 +53,18 @@ LayerTestResult LogicalUnaryTestHelper( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); - ret.compareBoolean = true; - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape(), + true); } template @@ -89,10 +89,7 @@ LayerTestResult LogicalBinaryTestHelper( ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean); - auto inputTensor0 = MakeTensor(inputTensorInfo0, input0); - auto inputTensor1 = MakeTensor(inputTensorInfo1, input1); - - LayerTestResult ret(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0); std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); @@ -113,17 +110,19 @@ LayerTestResult LogicalBinaryTestHelper( inputHandle1->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle0.get(), inputTensor0.origin()); - CopyDataToITensorHandle(inputHandle1.get(), inputTensor1.origin()); + CopyDataToITensorHandle(inputHandle0.get(), input0.data()); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); workload->PostAllocationConfigure(); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); - ret.compareBoolean = true; - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape(), + true); } class UnaryTestData diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp index 1c63542dcb..11003a2e97 100644 --- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp @@ -20,18 +20,17 @@ #include -#include - namespace { template> void LstmUtilsVectorBatchVectorAddTestImpl( - boost::multi_array& vec, - boost::multi_array& batchVec, + std::vector& vec, + std::vector& batchVec, uint32_t vSize, uint32_t nBatch, - boost::multi_array& expectedOutput ) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -45,19 +44,20 @@ void LstmUtilsVectorBatchVectorAddTestImpl( VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder); // check shape and compare values - auto result = CompareTensors(batchVec, expectedOutput); + auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position batchVecEncoder->Set(1.0f); - BOOST_TEST(batchVec[0][0] == 1.0f); + BOOST_TEST(batchVec[0] == 1.0f); } template> void LstmUtilsZeroVectorTestImpl( - boost::multi_array& input, + std::vector& input, uint32_t vSize, - boost::multi_array& expectedOutput) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -71,7 +71,7 @@ void LstmUtilsZeroVectorTestImpl( ZeroVector(*outputEncoder, vSize); // check shape and compare values - auto result = CompareTensors(input, expectedOutput); + auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position @@ -82,10 +82,11 @@ void LstmUtilsZeroVectorTestImpl( template> void LstmUtilsMeanStddevNormalizationTestImpl( - boost::multi_array& input, + std::vector& input, uint32_t vSize, uint32_t nBatch, - boost::multi_array& expectedOutput) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -98,21 +99,22 @@ void LstmUtilsMeanStddevNormalizationTestImpl( MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f); // check shape and compare values - auto result = CompareTensors(input, expectedOutput); + auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position outputEncoder->Set(1.0f); - BOOST_TEST(input[0][0] == 1.0f); + BOOST_TEST(input[0] == 1.0f); } template> void LstmUtilsVectorBatchVectorCwiseProductTestImpl( - boost::multi_array& vec, - boost::multi_array& batchVec, + std::vector& vec, + std::vector& batchVec, uint32_t vSize, uint32_t nBatch, - boost::multi_array& expectedOutput) + std::vector& expectedOutput, + armnn::TensorShape& expectedShape) { float qScale = 0.0f; int32_t qOffset = 0; @@ -126,12 +128,12 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl( VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder); // check shape and compare values - auto result = CompareTensors(batchVec, expectedOutput); + auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape); BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position batchVecEncoder->Set(1.0f); - BOOST_TEST(batchVec[0][0] == 1.0f); + BOOST_TEST(batchVec[0] == 1.0f); } // Lstm Layer tests: @@ -142,16 +144,18 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputExpectedShape, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) { IgnoreUnused(memoryManager); - unsigned int batchSize = armnn::numeric_cast(input.shape()[0]); - unsigned int inputSize = armnn::numeric_cast(input.shape()[1]); - unsigned int outputSize = armnn::numeric_cast(outputExpected.shape()[1]); + unsigned int batchSize = armnn::numeric_cast(inputShape[0]); + unsigned int inputSize = armnn::numeric_cast(inputShape[1]); + unsigned int outputSize = armnn::numeric_cast(outputExpectedShape[1]); // cellSize and outputSize have the same size when there is no projection. unsigned numUnits = outputSize; @@ -164,30 +168,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - std::vector inputVector; inputVector.assign(input.data(), input.data() + (batchSize * inputSize)); - auto inputTensor = MakeTensor(inputTensorInfo, inputVector); std::vector cellStateInVector(batchSize * numUnits, T()); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - std::vector outputStateInVector(batchSize * outputSize, T()); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); - std::vector scratchBufferVector(batchSize * numUnits * 4, T()); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); - std::vector outputStateOutVector(batchSize * outputSize, T()); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); - std::vector cellStateOutVector(batchSize * numUnits, T()); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize)); - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr cellStateInHandle = @@ -219,59 +212,59 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset); - auto inputToInputWeights = MakeTensor(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f, - -0.34550029f, 0.04266912f, -0.15680569f, - -0.34856534f, 0.43890524f}); + std::vector inputToInputWeights = {-0.45018822f, -0.02338299f, -0.0870589f, + -0.34550029f, 0.04266912f, -0.15680569f, + -0.34856534f, 0.43890524f}; - auto inputToForgetWeights = MakeTensor(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f, - -0.31343272f, -0.40032279f, 0.44781327f, - 0.01387155f, -0.35593212f}); + std::vector inputToForgetWeights = { 0.09701663f, 0.20334584f, -0.50592935f, + -0.31343272f, -0.40032279f, 0.44781327f, + 0.01387155f, -0.35593212f}; - auto inputToCellWeights = MakeTensor(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, - -0.20583314f, 0.44344562f, 0.22077113f, - -0.29909778f}); + std::vector inputToCellWeights = { -0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, + -0.20583314f, 0.44344562f, 0.22077113f, + -0.29909778f}; - auto inputToOutputWeights = MakeTensor(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f, - 0.40525138f, 0.44272184f, 0.03897077f, - -0.1556896f, 0.19487578f}); + std::vector inputToOutputWeights = { -0.25065863f, -0.28290087f, 0.04613829f, + 0.40525138f, 0.44272184f, 0.03897077f, + -0.1556896f, 0.19487578f}; - auto recurrentToInputWeights = MakeTensor(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f, - -0.35746509f, 0.28902304f, 0.08183324f, - -0.16555229f, 0.02286911f, -0.13566875f, - 0.03034258f, 0.48091322f, -0.12528998f, - 0.24077177f, -0.51332325f, -0.33502164f, - 0.10629296f}); + std::vector recurrentToInputWeights = {-0.0063535f, -0.2042388f, 0.31454784f, + -0.35746509f, 0.28902304f, 0.08183324f, + -0.16555229f, 0.02286911f, -0.13566875f, + 0.03034258f, 0.48091322f, -0.12528998f, + 0.24077177f, -0.51332325f, -0.33502164f, + 0.10629296f}; - auto recurrentToForgetWeights = MakeTensor(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f, - 0.2112639f, 0.27654213f, 0.20864892f, - -0.07646349f, 0.45877004f, 0.00141793f, - -0.14609534f, 0.36447752f, 0.09196436f, - 0.28053468f, 0.01560611f, -0.20127171f, - -0.01140004f}); + std::vector recurrentToForgetWeights = { -0.48684245f, -0.06655136f, 0.42224967f, + 0.2112639f, 0.27654213f, 0.20864892f, + -0.07646349f, 0.45877004f, 0.00141793f, + -0.14609534f, 0.36447752f, 0.09196436f, + 0.28053468f, 0.01560611f, -0.20127171f, + -0.01140004f}; - auto recurrentToCellWeights = MakeTensor(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f, - 0.26320225f, 0.05695659f, -0.00123841f, - -0.4744786f, -0.35869038f, -0.06418842f, - -0.13502428f, -0.501764f, 0.22830659f, - -0.46367589f, 0.26016325f, -0.03894562f, - -0.16368064f}); + std::vector recurrentToCellWeights = { -0.3407414f, 0.24443203f, -0.2078532f, + 0.26320225f, 0.05695659f, -0.00123841f, + -0.4744786f, -0.35869038f, -0.06418842f, + -0.13502428f, -0.501764f, 0.22830659f, + -0.46367589f, 0.26016325f, -0.03894562f, + -0.16368064f}; - auto recurrentToOutputWeights = MakeTensor(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f, - 0.09215671f, 0.24107647f, -0.39835793f, - 0.18212086f, 0.01301402f, 0.48572797f, - -0.50656658f, 0.20047462f, -0.20607421f, - -0.51818722f, -0.15390486f, 0.0468148f, - 0.39922136f}); + std::vector recurrentToOutputWeights = { 0.43385774f, -0.17194885f, 0.2718237f, + 0.09215671f, 0.24107647f, -0.39835793f, + 0.18212086f, 0.01301402f, 0.48572797f, + -0.50656658f, 0.20047462f, -0.20607421f, + -0.51818722f, -0.15390486f, 0.0468148f, + 0.39922136f}; - auto cellToInputWeights = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector cellToInputWeights = {0., 0., 0., 0.}; - auto inputGateBias = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector inputGateBias = {0., 0., 0., 0.}; - auto forgetGateBias = MakeTensor(tensorInfo4, {1., 1., 1., 1.}); + std::vector forgetGateBias = {1., 1., 1., 1.}; - auto cellBias = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector cellBias = {0., 0., 0., 0.}; - auto outputGateBias = MakeTensor(tensorInfo4, {0., 0., 0., 0.}); + std::vector outputGateBias = {0., 0., 0., 0.}; armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8); armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8); @@ -287,19 +280,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4); armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4); - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); data.m_InputToInputWeights = &inputToInputWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -330,15 +323,18 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -346,8 +342,8 @@ LayerTestResult LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) @@ -368,30 +364,19 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - std::vector inputVector; inputVector.assign(input.data(), input.data() + (batchSize * inputSize)); - auto inputTensor = MakeTensor(inputTensorInfo, inputVector); std::vector cellStateInVector(batchSize * numUnits, T()); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - std::vector outputStateInVector(batchSize * outputSize, T()); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); - std::vector scratchBufferVector(batchSize * numUnits * 4, T()); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); - std::vector outputStateOutVector(batchSize * outputSize, T()); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); - std::vector cellStateOutVector(batchSize * numUnits, T()); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize)); - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr cellStateInHandle = @@ -425,135 +410,118 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset); - auto inputToInputWeights = - MakeTensor(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f, - 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f, - -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f, - -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f, - -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f, - -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f, - -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f, - 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f, - 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f, - 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f, - -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f, - 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f, - -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f, - -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f, - -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f, - 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f, - -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f, - -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f, - -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f, - -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f - }); - - auto inputToForgetWeights = - MakeTensor(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f, - -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f, - -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f, - 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f, - 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f, - -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f, - -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f, - 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f, - 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f, - 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f, - 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f, - -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f, - 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f, - -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f, - -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f, - 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f, - 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f, - 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f, - -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f, - 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f - }); - - auto inputToCellWeights = - MakeTensor(tensorInfo20x5, {-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f, - -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f, - -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f, - -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f, - -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f, - 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f, - -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f, - 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f, - -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f, - -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f, - -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f, - 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f, - 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f, - 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f, - -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f, - -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f, - -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f, - -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f, - -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f, - -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f, - 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f, - 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f, - 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f, - 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f, - 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f - }); - - auto inputToOutputWeights = - MakeTensor(tensorInfo20x5, {-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f, - -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f, - 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f, - -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f, - -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f, - 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f, - -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f, - -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f, - -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f, - -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f, - 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f, - 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f, - 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f, - -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f, - 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f, - 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f, - -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f, - 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f, - -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f, - -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f - }); - - auto inputGateBias = - MakeTensor(tensorInfo20, {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f, - -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f, - -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f, - 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f - }); - - auto forgetGateBias = - MakeTensor(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, - 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f, - 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f, - -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f, - 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f - }); - - auto cellBias = - MakeTensor(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, - -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f, - -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f, - -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f, - 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f - }); - - auto outputGateBias = - MakeTensor(tensorInfo20, {0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f, - 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f, - 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f, - -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f - }); - - auto recurrentToInputWeights = - MakeTensor(tensorInfo20x16, {-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, + std::vector inputToInputWeights = {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f, + 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f, + -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f, + -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f, + -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f, + -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f, + -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f, + 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f, + 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f, + 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f, + -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f, + 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f, + -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f, + -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f, + -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f, + 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f, + -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f, + -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f, + -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f, + -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f }; + + std::vector inputToForgetWeights = {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f, + -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f, + -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f, + 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f, + 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f, + -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f, + -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f, + 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f, + 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f, + 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f, + 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f, + -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f, + 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f, + -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f, + -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f, + 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f, + 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f, + 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f, + -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f, + 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f }; + + std::vector inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f, + -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f, + -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f, + -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f, + -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f, + 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f, + -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f, + 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f, + -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f, + -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f, + -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f, + 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f, + 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f, + 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f, + -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f, + -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f, + -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f, + -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f, + -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f, + -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f, + 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f, + 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f, + 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f, + 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f, + 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f }; + + std::vector inputToOutputWeights ={-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f, + -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f, + 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f, + -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f, + -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f, + 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f, + -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f, + -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f, + -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f, + -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f, + 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f, + 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f, + 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f, + -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f, + 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f, + 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f, + -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f, + 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f, + -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f, + -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f }; + + std::vector inputGateBias = {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f, + -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f, + -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f, + 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f }; + + std::vector forgetGateBias ={0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, + 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f, + 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f, + -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f, + 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f }; + + std::vector cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, + -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f, + -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f, + -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f, + 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f }; + + std::vector outputGateBias ={0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f, + 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f, + 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f, + -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f}; + + std::vector recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f, -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f, -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f, @@ -632,11 +600,9 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f, -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f, - -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f - }); + -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f }; - auto recurrentToForgetWeights = - MakeTensor(tensorInfo20x16, {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, + std::vector recurrentToForgetWeights = {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f, -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f, 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f, @@ -715,11 +681,9 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f, 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f, - -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f - }); + -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f }; - auto recurrentToCellWeights = - MakeTensor(tensorInfo20x16, {-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f, + std::vector recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f, 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f, 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f, -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f, @@ -798,12 +762,10 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f, -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f, -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f, - -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f - }); + -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f }; - auto recurrentToOutputWeights = - MakeTensor(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f, - -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f, + std::vector recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f, + -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f, -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f, -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f, -0.023314456f, @@ -879,101 +841,90 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl -0.05347844f, -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f, 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f, -0.057309967f, -0.012775832f, -0.0032452994f, - 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f - }); - - auto cellToInputWeights = - MakeTensor(tensorInfo20, {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f, - -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f, - -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f, - 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f - }); - - - auto cellToForgetWeights = - MakeTensor(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f, - -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f, - -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f, - 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f - }); - - auto cellToOutputWeights = - MakeTensor(tensorInfo20, {0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f, - -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f, - -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f, - 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f - }); - - auto projectionWeights = - MakeTensor(tensorInfo16x20, - {-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f, - 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f, - -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f, - -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f, - 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f, - 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f, - 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f, - 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f, - -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f, - -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f, - -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f, - 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f, - 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f, - 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f, - 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f, - 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f, - -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f, - 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f, - -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f, - 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f, - -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f, - -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f, - 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f, - -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f, - 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f, - -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f, - -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f, - 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f, - -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f, - -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f, - -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f, - 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f, - 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f, - -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f, - 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f, - 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f, - 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f, - 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f, - 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f, - -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f, - -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f, - 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f, - -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f, - -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f, - 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f, - 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f, - 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f, - -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f, - -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f, - -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f, - 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f, - -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f, - 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f, - 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f, - -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f, - -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f, - -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f, - 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f, - -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f, - -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f, - -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f, - 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f, - 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f, - 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f - }); + 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f}; + + std::vector cellToInputWeights = {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f, + -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f, + -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f, + 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f}; + + + std::vector cellToForgetWeights = {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f, + -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f, + -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f, + 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f}; + + std::vector cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f, + -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f, + -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f, + 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f}; + + std::vector projectionWeights={-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f, + 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f, + -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f, + -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f, + 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f, + 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f, + 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f, + 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f, + -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f, + -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f, + -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f, + 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f, + 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f, + 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f, + 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f, + 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f, + -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f, + 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f, + -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f, + 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f, + -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f, + -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f, + 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f, + -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f, + 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f, + -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f, + -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f, + 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f, + -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f, + -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f, + -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f, + 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f, + 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f, + -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f, + 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f, + 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f, + 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f, + 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f, + 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f, + -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f, + -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f, + 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f, + -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f, + -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f, + 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f, + 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f, + 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f, + -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f, + -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f, + -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f, + 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f, + -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f, + 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f, + 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f, + -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f, + -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f, + -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f, + 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f, + -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f, + -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f, + -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f, + 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f, + 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f, + 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f}; std::vector projectionBiasVector(outputSize, 0.f); - auto projectionBias = MakeTensor(tensorInfo16, projectionBiasVector); armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5); armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5); @@ -993,23 +944,23 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20); armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16); - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data()); data.m_InputToInputWeights = &inputToInputWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1035,7 +986,6 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl data.m_Parameters.m_PeepholeEnabled = true; data.m_Parameters.m_ProjectionEnabled = true; - std::unique_ptr workload = workloadFactory.CreateLstm(data, info); inputHandle->Allocate(); outputStateInHandle->Allocate(); @@ -1046,16 +996,18 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); - - return ret; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -1063,8 +1015,10 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputExpectedShape, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) @@ -1074,10 +1028,10 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( bool peepholeEnabled = true; bool projectionEnabled = false; // These are not the input and the output of Lstm yet - unsigned int batchSize = armnn::numeric_cast(input.shape()[0]); - unsigned int inputSize = armnn::numeric_cast(input.shape()[1]); + unsigned int batchSize = armnn::numeric_cast(inputShape[0]); + unsigned int inputSize = armnn::numeric_cast(inputShape[1]); - unsigned int outputSize = armnn::numeric_cast(outputExpected.shape()[1]); + unsigned int outputSize = armnn::numeric_cast(outputExpectedShape[1]); const unsigned int cellSize = outputSize; @@ -1095,14 +1049,10 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( // List of inputs std::vector inputData; inputData.assign(input.data(), input.data() + batchSize*inputSize); - auto inputTensor = MakeTensor(inputTensorInfo, inputData); std::vector outputStateInVector(batchSize * outputSize, 0.f); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); std::vector cellStateInVector(batchSize * cellSize, 0.f); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - // Prepare all the weights in the descriptor for LSTM armnn::LstmQueueDescriptor data; @@ -1110,41 +1060,51 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset); - auto inputToCellWeights = MakeTensor(tensorInfoInput, - {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f, - 0.04717243f, 0.48944736f, -0.38535351f, - -0.17212132f}); - auto inputToForgetWeights = MakeTensor(tensorInfoInput, - {-0.55291498f, -0.42866567f, 0.13056988f, - -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f, - 0.33826375f}); - auto inputToOutputWeights = MakeTensor(tensorInfoInput, - {0.10725588f, -0.02335852f, -0.55932593f, - -0.09426838f, -0.44257352f, 0.54939759f, - 0.01533556f, 0.42751634f}); - auto cellBias = MakeTensor(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f}); - auto forgetGateBias = MakeTensor(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f}); - auto outputGateBias = MakeTensor(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f}); - - auto recurrentToCellWeights = MakeTensor(tensorInfoOutput, - {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f, - 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f, - 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f, - 0.21193194f}); - auto recurrentToForgetWeights = MakeTensor(tensorInfoOutput, - {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f, - 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f, - -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f}); - - auto recurrentToOutputWeights = MakeTensor(tensorInfoOutput, - {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f, - -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f, - 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f}); - - auto cellToForgetWeights = MakeTensor(tensorInfoNumUnits, - {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f}); - auto cellToOutputWeights = MakeTensor(tensorInfoNumUnits, - {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f}); + std::vector inputToCellWeights = + { + -0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f, + 0.04717243f, 0.48944736f, -0.38535351f, + -0.17212132f + }; + std::vector inputToForgetWeights = + { + -0.55291498f, -0.42866567f, 0.13056988f, + -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f, + 0.33826375f + }; + std::vector inputToOutputWeights = + { + 0.10725588f, -0.02335852f, -0.55932593f, + -0.09426838f, -0.44257352f, 0.54939759f, + 0.01533556f, 0.42751634f + }; + std::vector cellBias = {0.f, 0.f, 0.f, 0.f}; + std::vector forgetGateBias = {1.f, 1.f, 1.f, 1.f}; + std::vector outputGateBias = {0.f, 0.f, 0.f, 0.f}; + + std::vector recurrentToCellWeights = + { + 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f, + 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f, + 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f, + 0.21193194f + }; + std::vector recurrentToForgetWeights = + { + -0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f, + 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f, + -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f + }; + + std::vector recurrentToOutputWeights = + { + 0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f, + -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f, + 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f + }; + + std::vector cellToForgetWeights = {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f}; + std::vector cellToOutputWeights = {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f}; armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput); armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput); @@ -1158,25 +1118,23 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput); armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput); - armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits); armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data()); data.m_InputToCellWeights = &inputToCellWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1202,29 +1160,28 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( data.m_Parameters.m_ClippingThresProj = 0.0; data.m_Parameters.m_ClippingThresCell = 0.0; - // List of outputs std::vector scratchBufferVector(batchSize * scratchBufferSize, T()); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); LayerTestResult ret0(scratchBufferTensorInfo); // Output state for a certain time step std::vector outputStateOutVector(batchSize * outputSize, T()); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); LayerTestResult ret1(outputStateOutTensorInfo); // Cell state for a certain time step std::vector cellStateOutVector(batchSize * cellSize, T()); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); LayerTestResult ret2(cellStateOutTensorInfo); // Output for a certain time step - std::vector outputVector(batchSize * outputSize, T()); - auto outputTensor = MakeTensor(outputTensorInfo, outputVector); std::vector outputData; outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize); LayerTestResult ret3(outputTensorInfo); - ret3.outputExpected = MakeTensor(outputTensorInfo, outputData); + ret3.m_ExpectedData = outputData; + + std::vector actualScratchBufferOutput(scratchBufferTensorInfo.GetNumElements()); + std::vector actualOutputStateOutput(outputStateOutTensorInfo.GetNumElements()); + std::vector actualCellStateOutput(cellStateOutTensorInfo.GetNumElements()); + std::vector actualOutput(outputTensorInfo.GetNumElements()); // Prepare the inputs and outputs for the workload std::unique_ptr inputHandle = @@ -1255,7 +1212,6 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( std::unique_ptr workload = workloadFactory.CreateLstm(data, info); - inputHandle->Allocate(); outputStateInHandle->Allocate(); cellStateInHandle->Allocate(); @@ -1265,21 +1221,25 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); - - CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]); - CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]); - CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]); + CopyDataToITensorHandle(scratchBufferHandle.get(), scratchBufferVector.data()); + CopyDataToITensorHandle(outputStateOutHandle.get(), outputStateOutVector.data()); + CopyDataToITensorHandle(cellStateOutHandle.get(), cellStateOutVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get()); - CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get()); - CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get()); - CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualScratchBufferOutput.data(), scratchBufferHandle.get()); + CopyDataFromITensorHandle(actualOutputStateOutput.data(), outputStateOutHandle.get()); + CopyDataFromITensorHandle(actualCellStateOutput.data(), cellStateOutHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + ret0.m_ActualData = actualScratchBufferOutput; + ret1.m_ActualData = actualOutputStateOutput; + ret2.m_ActualData = actualCellStateOutput; + ret3.m_ActualData = actualOutput; return ret3; } @@ -1289,8 +1249,8 @@ LayerTestResult LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected, + const std::vector& input, + const std::vector& outputExpected, float qScale = 0.0f, int32_t qOffset = 0, armnn::DataType constantDataType = armnn::DataType::Float32) @@ -1311,30 +1271,19 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset); - LayerTestResult ret(outputTensorInfo); - std::vector inputVector; inputVector.assign(input.data(), input.data() + (batchSize * inputSize)); - auto inputTensor = MakeTensor(inputTensorInfo, inputVector); std::vector cellStateInVector(batchSize * numUnits, 0.f); - auto cellStateInTensor = MakeTensor(cellStateInTensorInfo, cellStateInVector); - std::vector outputStateInVector(batchSize * outputSize, 0.f); - auto outputStateInTensor = MakeTensor(outputStateInTensorInfo, outputStateInVector); - std::vector scratchBufferVector(batchSize * numUnits * 4, 0.f); - auto scratchBufferTensor = MakeTensor(scratchBufferTensorInfo, scratchBufferVector); - std::vector outputStateOutVector(batchSize * outputSize, 0.f); - auto outputStateOutTensor = MakeTensor(outputStateOutTensorInfo, outputStateOutVector); - std::vector cellStateOutVector(batchSize * numUnits, 0.f); - auto cellStateOutTensor = MakeTensor(cellStateOutTensorInfo, cellStateOutVector); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize)); - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr cellStateInHandle = @@ -1368,95 +1317,73 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset); armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset); - auto inputToInputWeights = - MakeTensor(tensorInfo4x5, { 0.5f, 0.6f, 0.7f, -0.8f, -0.9f, - 0.1f, 0.2f, 0.3f, -0.4f, 0.5f, - -0.8f, 0.7f, -0.6f, 0.5f, -0.4f, - -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}); //{numUnits, inputSize} + std::vector inputToInputWeights = {0.5f, 0.6f, 0.7f, -0.8f, -0.9f, + 0.1f, 0.2f, 0.3f, -0.4f, 0.5f, + -0.8f, 0.7f, -0.6f, 0.5f, -0.4f, + -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}; //{numUnits, inputSize} - auto inputToForgetWeights = - MakeTensor(tensorInfo4x5, {-0.6f, -0.1f, 0.3f, 0.2f, 0.9f, - -0.5f, -0.2f, -0.4f, 0.3f, -0.8f, - -0.4f, 0.3f, -0.5f, -0.4f, -0.6f, - 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}); //{numUnits, inputSize} + std::vector inputToForgetWeights = { -0.6f, -0.1f, 0.3f, 0.2f, 0.9f, + -0.5f, -0.2f, -0.4f, 0.3f, -0.8f, + -0.4f, 0.3f, -0.5f, -0.4f, -0.6f, + 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}; //{numUnits, inputSize} - auto inputToCellWeights = - MakeTensor(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f, - 0.5f, -0.2f, -0.3f, -0.2f, -0.6f, - 0.6f, -0.1f, -0.4f, -0.3f, -0.7f, - 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}); //{numUnits, inputSize} + std::vector inputToCellWeights = {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f, + 0.5f, -0.2f, -0.3f, -0.2f, -0.6f, + 0.6f, -0.1f, -0.4f, -0.3f, -0.7f, + 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}; //{numUnits, inputSize} - auto inputToOutputWeights = - MakeTensor(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f, - -0.7f, 0.3f, -0.3f, -0.8f, -0.2f, - 0.6f, -0.2f, 0.4f, -0.7f, -0.3f, - -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}); //{numUnits, inputSize} + std::vector inputToOutputWeights = {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f, + -0.7f, 0.3f, -0.3f, -0.8f, -0.2f, + 0.6f, -0.2f, 0.4f, -0.7f, -0.3f, + -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}; //{numUnits, inputSize} - auto inputGateBias = - MakeTensor(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f}); //{numUnits} + std::vector inputGateBias = {0.03f, 0.15f, 0.22f, 0.38f}; //{numUnits} - auto forgetGateBias = - MakeTensor(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f}); //{numUnits} + std::vector forgetGateBias = {0.1f, -0.3f, -0.2f, 0.1f}; //{numUnits} - auto cellBias = - MakeTensor(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits} + std::vector cellBias = {-0.05f, 0.72f, 0.25f, 0.08f}; //{numUnits} - auto outputGateBias = - MakeTensor(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f}); //{numUnits} + std::vector outputGateBias = {0.05f, -0.01f, 0.2f, 0.1f}; //{numUnits} - auto recurrentToInputWeights = - MakeTensor(tensorInfo4x3, {-0.2f, -0.3f, 0.4f, + std::vector recurrentToInputWeights ={-0.2f, -0.3f, 0.4f, 0.1f, -0.5f, 0.9f, -0.2f, -0.3f, -0.7f, - 0.05f, -0.2f, -0.6f}); //{numUnits, outputSize} + 0.05f, -0.2f, -0.6f}; //{numUnits, outputSize} - auto recurrentToCellWeights = - MakeTensor(tensorInfo4x3, {-0.3f, 0.2f, 0.1f, + std::vector recurrentToCellWeights = {-0.3f, 0.2f, 0.1f, -0.3f, 0.8f, -0.08f, -0.2f, 0.3f, 0.8f, - -0.6f, -0.1f, 0.2f}); //{numUnits, outputSize} + -0.6f, -0.1f, 0.2f}; //{numUnits, outputSize} - auto recurrentToForgetWeights = - MakeTensor(tensorInfo4x3, {-0.5f, -0.3f, -0.5f, - -0.2f, 0.6f, 0.4f, - 0.9f, 0.3f, -0.1f, - 0.2f, 0.5f, 0.2f}); //{numUnits, outputSize} + std::vector recurrentToForgetWeights = { -0.5f, -0.3f, -0.5f, + -0.2f, 0.6f, 0.4f, + 0.9f, 0.3f, -0.1f, + 0.2f, 0.5f, 0.2f}; //{numUnits, outputSize} - auto recurrentToOutputWeights = - MakeTensor(tensorInfo4x3, { 0.3f, -0.1f, 0.1f, - -0.2f, -0.5f, -0.7f, - -0.2f, -0.6f, -0.1f, - -0.4f, -0.7f, -0.2f}); //{numUnits, outputSize} + std::vector recurrentToOutputWeights = { 0.3f, -0.1f, 0.1f, + -0.2f, -0.5f, -0.7f, + -0.2f, -0.6f, -0.1f, + -0.4f, -0.7f, -0.2f}; //{numUnits, outputSize} - auto cellToInputWeights = - MakeTensor(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f}); //{numUnits} + std::vector cellToInputWeights = {0.05f, 0.1f, 0.25f, 0.15f}; //{numUnits} - auto cellToForgetWeights = - MakeTensor(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits} + std::vector cellToForgetWeights = {-0.02f, -0.15f, -0.25f, -0.03f}; //{numUnits} - auto cellToOutputWeights = - MakeTensor(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f}); //{numUnits} + std::vector cellToOutputWeights = {0.1f, -0.1f, -0.5f, 0.05f}; //{numUnits} - auto projectionWeights = - MakeTensor(tensorInfo3x4, - {-0.1f, 0.2f, 0.01f, -0.2f, - 0.1f, 0.5f, 0.3f, 0.08f, - 0.07f, 0.2f, -0.4f, 0.2f}); //{outputSize, numUnits} + std::vector projectionWeights = {-0.1f, 0.2f, 0.01f, -0.2f, + 0.1f, 0.5f, 0.3f, 0.08f, + 0.07f, 0.2f, -0.4f, 0.2f}; //{outputSize, numUnits} - std::vector projectionBiasVector(outputSize, 0.f); - auto projectionBias = MakeTensor(tensorInfo3, projectionBiasVector); //{outputSize} + std::vector projectionBiasVector(outputSize, 0.f); //{outputSize} - auto inputLayerNormWeights = - MakeTensor(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits} + std::vector inputLayerNormWeights = {0.1f, 0.2f, 0.3f, 0.5f}; //{numUnits} - auto forgetLayerNormWeights = - MakeTensor(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits} + std::vector forgetLayerNormWeights = {0.2f, 0.2f, 0.4f, 0.3f}; //{numUnits} - auto cellLayerNormWeights = - MakeTensor(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits} + std::vector cellLayerNormWeights = {0.7f, 0.2f, 0.3f, 0.8f}; //{numUnits} - auto outputLayerNormWeights = - MakeTensor(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits} + std::vector outputLayerNormWeights = {0.6f, 0.2f, 0.2f, 0.5f}; //{numUnits} armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5); @@ -1482,28 +1409,28 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4); armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4); - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]); - - AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); + AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data()); + + AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); data.m_InputToInputWeights = &inputToInputWeightsTensor; data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1546,28 +1473,33 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult QuantizedLstmTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputExpectedShape) { IgnoreUnused(memoryManager); - auto numBatches = armnn::numeric_cast(input.shape()[0]); - auto inputSize = armnn::numeric_cast(input.shape()[1]); - auto outputSize = armnn::numeric_cast(outputExpected.shape()[1]); + auto numBatches = armnn::numeric_cast(inputShape[0]); + auto inputSize = armnn::numeric_cast(inputShape[1]); + auto outputSize = armnn::numeric_cast(outputExpectedShape[1]); // Scale/Offset for input/output, cellState In/Out, weights, bias float inputOutputScale = 0.0078125f; @@ -1598,29 +1530,23 @@ LayerTestResult QuantizedLstmTestImpl( inputOutputScale, inputOutputOffset); - LayerTestResult ret(outputStateInfo); - // Input0 std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); // Input1 std::vector cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13 - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); - // Input2 std::vector outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14 - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output0 std::vector cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0 - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); // Output1 std::vector outputVector; // 1 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -1658,24 +1584,24 @@ LayerTestResult QuantizedLstmTestImpl( armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset); // Weights and bias tensor data - auto inputToInputWeights = MakeTensor(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108}); - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48}); - - auto recurrentToInputWeights = MakeTensor(recurrentWeightsInfo, - {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26}); - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98}); - - auto inputGateBias = MakeTensor(biasInfo, {-7876, 13488, -726, 32839}); - auto forgetGateBias = MakeTensor(biasInfo, {9206, -46884, -11693, -38724}); - auto cellBias = MakeTensor(biasInfo, {39481, 48624, 48976, -21419}); - auto outputGateBias = MakeTensor(biasInfo, {-58999, -17050, -41852, -40538}); + std::vector inputToInputWeights = {146, 250, 235, 171, 10, 218, 171, 108}; + std::vector inputToForgetWeights = {24, 50, 132, 179, 158, 110, 3, 169}; + std::vector inputToCellWeights = {133, 34, 29, 49, 206, 109, 54, 183}; + std::vector inputToOutputWeights = {195, 187, 11, 99, 109, 10, 218, 48}; + + std::vector recurrentToInputWeights = + {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26}; + std::vector recurrentToForgetWeights = + {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253}; + std::vector recurrentToCellWeights = + {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216}; + std::vector recurrentToOutputWeights = + {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98}; + + std::vector inputGateBias = {-7876, 13488, -726, 32839}; + std::vector forgetGateBias = {9206, -46884, -11693, -38724}; + std::vector cellBias = {39481, 48624, 48976, -21419}; + std::vector outputGateBias = {-58999, -17050, -41852, -40538}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo); @@ -1694,20 +1620,20 @@ LayerTestResult QuantizedLstmTestImpl( armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); // Setup queue descriptor data.m_InputToInputWeights = &inputToInputWeightsTensor; @@ -1734,15 +1660,18 @@ LayerTestResult QuantizedLstmTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } // QLSTM: CIFG, LayerNorm @@ -1750,8 +1679,8 @@ LayerTestResult QLstmTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected) { IgnoreUnused(memoryManager); unsigned int numBatches = 2; @@ -1816,21 +1745,18 @@ LayerTestResult QLstmTestImpl( // Input tensors std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); std::vector cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); std::vector outputStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output tensors - std::vector cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149}; - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); + std::vector cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149}; std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -1873,27 +1799,27 @@ LayerTestResult QLstmTestImpl( armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset); // Weights and bias tensor data - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, - {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, - {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, - {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}); - - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38}); - - auto forgetGateBias = MakeTensor(biasInfo, {2147484, -6442451, -4294968, 2147484}); - auto cellBias = MakeTensor(biasInfo, {-1073742, 15461883, 5368709, 1717987}); - auto outputGateBias = MakeTensor(biasInfo, {1073742, -214748, 4294968, 2147484}); - - auto forgetLayerNormWeights = MakeTensor(layerNormWeightsInfo, {6553, 6553, 13107, 9830}); - auto cellLayerNormWeights = MakeTensor(layerNormWeightsInfo, {22937, 6553, 9830, 26214}); - auto outputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {19660, 6553, 6553, 16384}); + std::vector inputToForgetWeights = + {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}; + std::vector inputToCellWeights = + {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}; + std::vector inputToOutputWeights = + {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}; + + std::vector recurrentToForgetWeights = + {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51}; + std::vector recurrentToCellWeights = + {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64}; + std::vector recurrentToOutputWeights = + {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38}; + + std::vector forgetGateBias = {2147484, -6442451, -4294968, 2147484}; + std::vector cellBias = {-1073742, 15461883, 5368709, 1717987}; + std::vector outputGateBias = {1073742, -214748, 4294968, 2147484}; + + std::vector forgetLayerNormWeights = {6553, 6553, 13107, 9830}; + std::vector cellLayerNormWeights = {22937, 6553, 9830, 26214}; + std::vector outputLayerNormWeights = {19660, 6553, 6553, 16384}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo); @@ -1913,21 +1839,21 @@ LayerTestResult QLstmTestImpl( armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); // Setup queue descriptor data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -1972,15 +1898,18 @@ LayerTestResult QLstmTestImpl( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } // QLSTM: Projection, LayerNorm @@ -1988,8 +1917,8 @@ LayerTestResult QLstmTestImpl1( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected) { IgnoreUnused(memoryManager); unsigned int numBatches = 2; @@ -2051,26 +1980,21 @@ LayerTestResult QLstmTestImpl1( outputScale, outputOffset); - LayerTestResult ret(outputStateInfo); - // Input tensors std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); std::vector cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); std::vector outputStateInVector = {0, 0, 0, 0, 0, 0}; - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output tensors std::vector cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939}; - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -2118,36 +2042,31 @@ LayerTestResult QLstmTestImpl1( 0); // Weights and bias tensor data - auto inputToInputWeights = MakeTensor(inputWeightsInfo, - {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13}); - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, - {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, - {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, - {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}); - - auto recurrentToInputWeights = MakeTensor(recurrentWeightsInfo, - {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77}); - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}); - - auto inputGateBias = MakeTensor(biasInfo, {644245, 3221226, 4724464, 8160438}); - auto forgetGateBias = MakeTensor(biasInfo, {2147484, -6442451, -4294968, 2147484}); - auto cellBias = MakeTensor(biasInfo, {-1073742, 15461883, 5368709, 1717987}); - auto outputGateBias = MakeTensor(biasInfo, {1073742, -214748, 4294968, 2147484}); - - auto inputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {3277, 6553, 9830, 16384}); - auto forgetLayerNormWeights = MakeTensor(layerNormWeightsInfo, {6553, 6553, 13107, 9830}); - auto cellLayerNormWeights = MakeTensor(layerNormWeightsInfo, {22937, 6553, 9830, 26214}); - auto outputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {19660, 6553, 6553, 16384}); - - auto projectionWeights = MakeTensor(projectionWeightsInfo, - {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}); + std::vector inputToInputWeights = + {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13}; + std::vector inputToForgetWeights = + {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}; + std::vector inputToCellWeights = + {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}; + std::vector inputToOutputWeights = + {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}; + + std::vector recurrentToInputWeights = {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77}; + std::vector recurrentToForgetWeights = {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}; + std::vector recurrentToCellWeights = {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}; + std::vector recurrentToOutputWeights = {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}; + + std::vector inputGateBias = {644245, 3221226, 4724464, 8160438}; + std::vector forgetGateBias = {2147484, -6442451, -4294968, 2147484}; + std::vector cellBias = {-1073742, 15461883, 5368709, 1717987}; + std::vector outputGateBias = {1073742, -214748, 4294968, 2147484}; + + std::vector inputLayerNormWeights = {3277, 6553, 9830, 16384}; + std::vector forgetLayerNormWeights = {6553, 6553, 13107, 9830}; + std::vector cellLayerNormWeights = {22937, 6553, 9830, 26214}; + std::vector outputLayerNormWeights = {19660, 6553, 6553, 16384}; + + std::vector projectionWeights = {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo); @@ -2173,27 +2092,27 @@ LayerTestResult QLstmTestImpl1( armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data()); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); // Setup queue descriptor data.m_InputToInputWeights = &inputToInputWeightsTensor; @@ -2244,15 +2163,18 @@ LayerTestResult QLstmTestImpl1( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } // QLSTM: Projection, CIFG, LayerNorm @@ -2260,8 +2182,8 @@ LayerTestResult QLstmTestImpl2( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected) { IgnoreUnused(memoryManager); unsigned int numBatches = 2; @@ -2323,26 +2245,21 @@ LayerTestResult QLstmTestImpl2( outputScale, outputOffset); - LayerTestResult ret(outputStateInfo); - // Input tensors std::vector inputVector; inputVector.assign(input.data(), input.data() + (numBatches * inputSize)); - auto inputTensor = MakeTensor(inputInfo, inputVector); std::vector cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0}; - auto cellStateInTensor = MakeTensor(cellStateInfo, cellStateInVector); std::vector outputStateInVector = {0, 0, 0, 0, 0, 0}; - auto outputStateInTensor = MakeTensor(outputStateInfo, outputStateInVector); // Output tensors - std::vector cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939}; - auto cellStateOutTensor = MakeTensor(cellStateInfo, cellStateOutVector); + std::vector cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939}; std::vector outputVector; outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize)); - ret.outputExpected = MakeTensor(outputStateInfo, outputVector); + + std::vector actualOutput(outputStateInfo.GetNumElements()); // Create tensor handles std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); @@ -2390,30 +2307,29 @@ LayerTestResult QLstmTestImpl2( 0); // Weights and bias tensor data - auto inputToForgetWeights = MakeTensor(inputWeightsInfo, - {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}); - auto inputToCellWeights = MakeTensor(inputWeightsInfo, - {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}); - auto inputToOutputWeights = MakeTensor(inputWeightsInfo, - {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}); - - auto recurrentToForgetWeights = MakeTensor(recurrentWeightsInfo, - {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}); - auto recurrentToCellWeights = MakeTensor(recurrentWeightsInfo, - {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}); - auto recurrentToOutputWeights = MakeTensor(recurrentWeightsInfo, - {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}); - - auto forgetGateBias = MakeTensor(biasInfo, {2147484, -6442451, -4294968, 2147484}); - auto cellBias = MakeTensor(biasInfo, {-1073742, 15461883, 5368709, 1717987}); - auto outputGateBias = MakeTensor(biasInfo, {1073742, -214748, 4294968, 2147484}); - - auto forgetLayerNormWeights = MakeTensor(layerNormWeightsInfo, {6553, 6553, 13107, 9830}); - auto cellLayerNormWeights = MakeTensor(layerNormWeightsInfo, {22937, 6553, 9830, 26214}); - auto outputLayerNormWeights = MakeTensor(layerNormWeightsInfo, {19660, 6553, 6553, 16384}); - - auto projectionWeights = MakeTensor(projectionWeightsInfo, - {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}); + std::vector inputToForgetWeights = + {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64}; + std::vector inputToCellWeights = + {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77}; + std::vector inputToOutputWeights = + {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51}; + + std::vector recurrentToForgetWeights = + {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25}; + std::vector recurrentToCellWeights = + {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25}; + std::vector recurrentToOutputWeights = + {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25}; + + std::vector forgetGateBias = {2147484, -6442451, -4294968, 2147484}; + std::vector cellBias = {-1073742, 15461883, 5368709, 1717987}; + std::vector outputGateBias = {1073742, -214748, 4294968, 2147484}; + + std::vector forgetLayerNormWeights = {6553, 6553, 13107, 9830}; + std::vector cellLayerNormWeights = {22937, 6553, 9830, 26214}; + std::vector outputLayerNormWeights = {19660, 6553, 6553, 16384}; + + std::vector projectionWeights = {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51}; // ScopedTensorHandles armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo); @@ -2435,23 +2351,23 @@ LayerTestResult QLstmTestImpl2( armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo); // Allocate and copy data - AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]); - AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data()); + AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data()); - AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]); - AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]); - AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]); + AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data()); + AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data()); + AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data()); - AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]); - AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]); + AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data()); + AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data()); - AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]); + AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data()); // Setup queue descriptor data.m_InputToForgetWeights = &inputToForgetWeightsTensor; @@ -2498,15 +2414,18 @@ LayerTestResult QLstmTestImpl2( cellStateOutHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]); - CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputVector.data()); + CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data()); + CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputVector, + outputHandle->GetShape(), + outputStateInfo.GetShape()); } @@ -2519,13 +2438,10 @@ LayerTestResult QLstmTestImpl2( void LstmUtilsZeroVectorTest() { armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {2., 3., 3., 4.})); - - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( - {0., 0., 0., 0.})); + std::vector input = {2., 3., 3., 4.}; + std::vector expectedOutput = {0., 0., 0., 0.}; - return LstmUtilsZeroVectorTestImpl(input, 4, expectedOutput); + return LstmUtilsZeroVectorTestImpl(input, 4, expectedOutput, inputDesc.GetShape()); } void LstmUtilsMeanStddevNormalizationNoneZeroInputTest() @@ -2533,16 +2449,16 @@ void LstmUtilsMeanStddevNormalizationNoneZeroInputTest() uint32_t batchSize = 2; uint32_t vecSize = 4; armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0 - 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1 + std::vector input = + { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0 + 0.9f, 1.0f, 1.1f, 1.2f }; //batch 1 - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( - { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0 - -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1 + std::vector expectedOutput = + { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0 + -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f }; //batch 1 return LstmUtilsMeanStddevNormalizationTestImpl(input, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, inputDesc.GetShape()); } void LstmUtilsMeanStddevNormalizationAllZeroInputTest() @@ -2550,16 +2466,16 @@ void LstmUtilsMeanStddevNormalizationAllZeroInputTest() uint32_t batchSize = 2; uint32_t vecSize = 4; armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1 + 0.0f, 0.0f, 0.0f, 0.0f }; //batch 1 - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( + std::vector expectedOutput = { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1 + 0.0f, 0.0f, 0.0f, 0.0f }; //batch 1 return LstmUtilsMeanStddevNormalizationTestImpl(input, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, inputDesc.GetShape()); } void LstmUtilsMeanStddevNormalizationMixedZeroInputTest() @@ -2567,16 +2483,16 @@ void LstmUtilsMeanStddevNormalizationMixedZeroInputTest() uint32_t batchSize = 2; uint32_t vecSize = 4; armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1 + std::vector input = + { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 + 0.1f, 0.2f, 0.3f, 0.4f }; //batch 1 - boost::multi_array expectedOutput = MakeTensor(inputDesc, std::vector( - { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 - -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1 + std::vector expectedOutput = + { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0 + -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }; //batch 1 return LstmUtilsMeanStddevNormalizationTestImpl(input, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, inputDesc.GetShape()); } void LstmUtilsVectorBatchVectorCwiseProductTest() @@ -2584,13 +2500,13 @@ void LstmUtilsVectorBatchVectorCwiseProductTest() uint32_t batchSize = 4; uint32_t vecSize = 29; armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32); - boost::multi_array vector = MakeTensor(vecDesc, std::vector( + std::vector vector = { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f, - 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f})); + 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}; armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array batchVector = MakeTensor(batchVecDesc, std::vector( + std::vector batchVector = { /* batch 0 */ 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f, @@ -2606,10 +2522,10 @@ void LstmUtilsVectorBatchVectorCwiseProductTest() /* batch 3 */ -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f, -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f, - -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f})); + -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}; // Expect output = input * output + output. - boost::multi_array expectedOutput = MakeTensor(batchVecDesc, std::vector( + std::vector expectedOutput = { /* batch 0 */ 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f, 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f, @@ -2633,10 +2549,10 @@ void LstmUtilsVectorBatchVectorCwiseProductTest() -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f, -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f, -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f, - -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f})); + -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}; return LstmUtilsVectorBatchVectorCwiseProductTestImpl(vector, batchVector, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, vecDesc.GetShape()); } void LstmUtilsVectorBatchVectorAddTest() @@ -2644,20 +2560,23 @@ void LstmUtilsVectorBatchVectorAddTest() uint32_t batchSize = 2; uint32_t vecSize = 3; armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32); - boost::multi_array vector = MakeTensor(vecDesc, std::vector( - { 0.0f, -0.5f, 1.0f})); + std::vector vector = { 0.0f, -0.5f, 1.0f}; armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32); - boost::multi_array batchVector = MakeTensor(batchVecDesc, std::vector( - { 1.0f, 2.0f, 3.0f, //batch 0 - 4.0f, 5.0f, 6.0f})); //batch 1 - - boost::multi_array expectedOutput = MakeTensor(batchVecDesc, std::vector( - { 1.0f, 1.5f, 4.0f, - 4.0f, 4.5f, 7.0f})); + std::vector batchVector = + { + 1.0f, 2.0f, 3.0f, //batch 0 + 4.0f, 5.0f, 6.0f //batch 1 + }; + + std::vector expectedOutput = + { + 1.0f, 1.5f, 4.0f, + 4.0f, 4.5f, 7.0f + }; return LstmUtilsVectorBatchVectorAddTestImpl(vector, batchVector, - vecSize, batchSize, expectedOutput); + vecSize, batchSize, expectedOutput, batchVecDesc.GetShape()); } #endif @@ -2668,15 +2587,15 @@ LayerTestResult LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - { 2., 3., 3., 4. })); + std::vector input = { 2., 3., 3., 4. }; armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( + std::vector expectedOutput = {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, - -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})); + -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}; return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape()); } LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest( @@ -2685,19 +2604,18 @@ LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, - 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})); + 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}; armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, - -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f, - -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, - 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, - -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f, - 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, - 0.02168f})); + std::vector expectedOutput = + {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, + -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f, + -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, + 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, + -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f, + 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f}; return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2708,16 +2626,16 @@ LayerTestResult LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {2., 3., 3., 4.})); + std::vector input = {2., 3., 3., 4.}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, - -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})); + std::vector expectedOutput = + {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, + -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}; return LstmNoCifgNoPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape()); } LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest( @@ -2726,14 +2644,14 @@ LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLa const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32); - boost::multi_array input = MakeTensor(inputDesc, std::vector( + std::vector input = {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0 - 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1 + 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; //batch 1 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( + std::vector expectedOutput = { 0.0244077f, 0.128027f, -0.00170918f, //batch 0 - -0.00692428f, 0.0848741f, 0.063445f})); //batch 1 + -0.00692428f, 0.0848741f, 0.063445f}; //batch 1 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2750,22 +2668,20 @@ LayerTestResult LstmLayerInt16NoCifgNoPeepholeNoProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8; armnn::TensorInfo inputDesc({2, 2}, datatype); - boost::multi_array input = MakeTensor( - inputDesc, - armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset); armnn::TensorInfo outputDesc({2, 4}, datatype); - boost::multi_array expectedOutput = MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, - -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, + -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f + }, + qScale, qOffset); return LstmNoCifgNoPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(), + qScale, qOffset, constantDatatype); } @@ -2781,24 +2697,20 @@ LayerTestResult LstmLayerInt16WithCifgWithPeepholeNoProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8; armnn::TensorInfo inputDesc({ 2, 2 }, datatype); - boost::multi_array input = - MakeTensor( - inputDesc, - armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset); armnn::TensorInfo outputDesc({ 2, 4 }, datatype); - boost::multi_array expectedOutput = - MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, - -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, + -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f + }, + qScale, qOffset); return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(), + qScale, qOffset, constantDatatype); } LayerTestResult LstmLayerInt16NoCifgWithPeepholeWithProjectionTest( @@ -2813,32 +2725,26 @@ LayerTestResult LstmLayerInt16NoCifgWithPeepholeWithProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8; armnn::TensorInfo inputDesc({ 2, 5 }, datatype); - boost::multi_array input = - MakeTensor( - inputDesc, - armnnUtils::QuantizedVector( - { - 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, - 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f - }, - qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector( + { + 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, + 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f + }, + qScale, qOffset); armnn::TensorInfo outputDesc({ 2, 16 }, datatype); - boost::multi_array expectedOutput = - MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f, - -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f, - 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f, - 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f, - -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f, - -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f, - 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f, - 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f, + -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f, + 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f, + 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f, + -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f, + -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f, + 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f, + 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f + }, + qScale, qOffset); return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl( workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype); @@ -2855,23 +2761,20 @@ LayerTestResult LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16 armnn::TensorInfo inputDesc({2, 2}, datatype); - boost::multi_array input = - MakeTensor(inputDesc, - armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset); armnn::TensorInfo outputDesc({2, 4}, datatype); - boost::multi_array expectedOutput = - MakeTensor( - outputDesc, - armnnUtils::QuantizedVector( - { - -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, - -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f - }, - qScale, qOffset)); + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, + -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f + }, + qScale, qOffset); return LstmNoCifgNoPeepholeNoProjectionTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, datatype); + workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(), + qScale, qOffset, datatype); } // @@ -2884,14 +2787,13 @@ LayerTestResult QuantizedLstmTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {166, 179, 50, 150})); + std::vector input = {166, 179, 50, 150}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {140, 151, 146, 112, 136, 156, 142, 112 })); + std::vector expectedOutput = {140, 151, 146, 112, 136, 156, 142, 112 }; - return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); + return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, + input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape()); } // QLSTM @@ -2901,12 +2803,10 @@ LayerTestResult QLstmTest( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {90, 102, 13, 26, 38, 102, 13, 26, 51, 64})); + std::vector input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmS8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {-15, 21, 14, 20, -15, 15, 5, 27})); + std::vector expectedOutput = {-15, 21, 14, 20, -15, 15, 5, 27}; return QLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2917,12 +2817,10 @@ LayerTestResult QLstmTest1( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {90, 102, 13, 26, 38, 102, 13, 26, 51, 64})); + std::vector input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {127, 127, -108, -67, 127, 127})); + std::vector expectedOutput = {127, 127, -108, -67, 127, 127}; return QLstmTestImpl1(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } @@ -2933,12 +2831,10 @@ LayerTestResult QLstmTest2( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8); - boost::multi_array input = MakeTensor(inputDesc, std::vector( - {90, 102, 13, 26, 38, 102, 13, 26, 51, 64})); + std::vector input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8); - boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - {127, 127, 127, -128, 127, 127})); + std::vector expectedOutput = {127, 127, 127, -128, 127, 127}; return QLstmTestImpl2(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput); } \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp index ba827b1860..0f045d1198 100644 --- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp @@ -40,11 +40,10 @@ LayerTestResult MeanTestHelper( outputTensorInfo.SetQuantizationScale(scale); outputTensorInfo.SetQuantizationOffset(offset); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); + auto input = ConvertToDataType(inputData, inputTensorInfo); - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor( - outputTensorInfo, ConvertToDataType(outputData, outputTensorInfo)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = ConvertToDataType(outputData, outputTensorInfo); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -61,14 +60,17 @@ LayerTestResult MeanTestHelper( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp index b8dc5f5667..c7b082183c 100644 --- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp @@ -545,10 +545,11 @@ LayerTestResult CompareMultiplicationTest( inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); - LayerTestResult comparisonResult(outputTensorInfo); + auto input0 = MakeRandomTensor(inputTensorInfo0, 803506992); + auto input1 = MakeRandomTensor(inputTensorInfo1, 54902257); - auto input0 = MakeRandomTensor(inputTensorInfo0, 803506992); - auto input1 = MakeRandomTensor(inputTensorInfo1, 54902257); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0); std::unique_ptr inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1); @@ -580,17 +581,20 @@ LayerTestResult CompareMultiplicationTest( inputHandle1Ref->Allocate(); outputHandleRef->Allocate(); - CopyDataToITensorHandle(inputHandle0.get(), input0.origin()); - CopyDataToITensorHandle(inputHandle1.get(), input1.origin()); - CopyDataToITensorHandle(inputHandle0Ref.get(), input0.origin()); - CopyDataToITensorHandle(inputHandle1Ref.get(), input1.origin()); + CopyDataToITensorHandle(inputHandle0.get(), input0.data()); + CopyDataToITensorHandle(inputHandle1.get(), input1.data()); + CopyDataToITensorHandle(inputHandle0Ref.get(), input0.data()); + CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data()); workload->PostAllocationConfigure(); workload->Execute(); workloadRef->PostAllocationConfigure(); workloadRef->Execute(); - CopyDataFromITensorHandle(comparisonResult.output.origin(), outputHandle.get()); - CopyDataFromITensorHandle(comparisonResult.outputExpected.origin(), outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); - return comparisonResult; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp index b52dcd5303..153afd9cd7 100644 --- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp @@ -44,16 +44,18 @@ LayerTestResult SimpleNormalizationTestImpl( auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - LayerTestResult ret(outputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, std::vector({ + std::vector input = + { // Batch #0 1.0f, 2.0f, 3.0f, 4.0f, // Batch #1 5.0f, 6.0f, 7.0f, 8.0f - })); + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); float alpha = 1.f; float beta = 1.f; @@ -75,7 +77,7 @@ LayerTestResult SimpleNormalizationTestImpl( data.m_Parameters.m_K = kappa; data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW; - armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]); + armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data()); armnn::NormalizationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); @@ -85,11 +87,11 @@ LayerTestResult SimpleNormalizationTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); switch (normMethod) { @@ -104,23 +106,34 @@ LayerTestResult SimpleNormalizationTestImpl( // pow((kappa + (accumulatedScale * alpha)), beta) // ...where accumulatedScale is the sum of every element squared. float divisor[inputNum]; - for(int i = 0; i < armnn::numeric_cast(inputNum); i++) + + float accumulatedScale1 = 0.0f; + for (size_t i = 0; i < input.size()/2; ++i) + { + accumulatedScale1 += input[i]*input[i]; + } + + float accumulatedScale2 = 0.0f; + for (size_t i = input.size()/2; i < input.size(); ++i) { - float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] + - input[i][0][0][1]*input[i][0][0][1] + - input[i][0][1][0]*input[i][0][1][0] + - input[i][0][1][1]*input[i][0][1][1]; - divisor[i] = powf((kappa + accumulatedScale * alpha), beta); + accumulatedScale2 += input[i]*input[i]; } - ret.outputExpected = MakeTensor(outputTensorInfo, - std::vector({input[0][0][0][0]/divisor[0], - input[0][0][0][1]/divisor[0], - input[0][0][1][0]/divisor[0], - input[0][0][1][1]/divisor[0], - input[1][0][0][0]/divisor[1], - input[1][0][0][1]/divisor[1], - input[1][0][1][0]/divisor[1], - input[1][0][1][1]/divisor[1]})); + + divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta); + divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta); + + std::vector output; + unsigned int divisorIndex = 0; + for (size_t i = 0; i < input.size(); ++i) + { + if (i == input.size()/2) + { + divisorIndex++; + } + output.emplace_back(input[i]/divisor[divisorIndex]); + } + + expectedOutput = output; break; } case armnn::NormalizationAlgorithmChannel::Across: @@ -131,19 +144,14 @@ LayerTestResult SimpleNormalizationTestImpl( // ...where adjacent channels means within half the normSize for the channel // The test data has only one channel, so this is simplified below. std::vector outputVector; - for (int n = 0; n < armnn::numeric_cast(inputNum); ++n) + + for (unsigned int i = 0; i < input.size(); ++i) { - for (int h = 0; h < armnn::numeric_cast(inputHeight); ++h) - { - for (int w = 0; w < armnn::numeric_cast(inputWidth); ++w) - { - float accumulatedScale = input[n][0][h][w]*input[n][0][h][w]; - float scale = powf((kappa + accumulatedScale * alpha), -beta); - outputVector.push_back(input[n][0][h][w] * scale); - } - } + float accumulatedScale = input[i]*input[i]; + float scale = powf((kappa + accumulatedScale * alpha), -beta); + outputVector.push_back(input[i] * scale); } - ret.outputExpected = MakeTensor(outputTensorInfo, outputVector); + expectedOutput = outputVector; break; } default: @@ -162,7 +170,10 @@ LayerTestResult SimpleNormalizationTestImpl( } } - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult SimpleNormalizationNhwcTestImpl( @@ -188,16 +199,18 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - LayerTestResult ret(outputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, std::vector({ + std::vector input = + { // Batch #0 1.0f, 2.0f, 3.0f, 4.0f, // Batch #1 5.0f, 6.0f, 7.0f, 8.0f - })); + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); float alpha = 1.f; float beta = 1.f; @@ -219,7 +232,7 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( data.m_Parameters.m_K = kappa; data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; - armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]); + armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data()); armnn::NormalizationQueueDescriptor refData = data; armnn::WorkloadInfo refInfo = info; SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); @@ -229,11 +242,11 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); switch (normMethod) { @@ -243,9 +256,8 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( { case armnn::NormalizationAlgorithmChannel::Across: { - std::vector expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f, - 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f }; - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); + expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f, + 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f }; break; } default: @@ -264,7 +276,10 @@ LayerTestResult SimpleNormalizationNhwcTestImpl( } } - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } LayerTestResult CompareNormalizationTestImpl( @@ -297,7 +312,10 @@ LayerTestResult CompareNormalizationTestImpl( LayerTestResult ret(outputTensorInfo); - auto input = MakeRandomTensor(inputTensorInfo, 111234); + auto input = MakeRandomTensor(inputTensorInfo, 111234); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); constexpr float alpha = 1.f; constexpr float beta = 1.f; @@ -330,9 +348,9 @@ LayerTestResult CompareNormalizationTestImpl( armnn::BackendId backend = workloadFactory.GetBackendId(); const size_t reasonIfUnsupportedMaxLen = 255; char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; - ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters, - reasonIfUnsupported, reasonIfUnsupportedMaxLen); - if (!ret.supported) + ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!ret.m_Supported) { return ret; } @@ -346,19 +364,125 @@ LayerTestResult CompareNormalizationTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); ExecuteWorkload(*workload, memoryManager); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); + ret.m_ActualData = actualOutput; + ret.m_ExpectedData = expectedOutput; return ret; } +LayerTestResult AcrossChannelNormalizationTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::NormalizationAlgorithmChannel normChannel, + armnn::NormalizationAlgorithmMethod normMethod) +{ + const unsigned int inputHeight = 1; + const unsigned int inputWidth = 2; + const unsigned int inputChannels = 3; + const unsigned int inputNum = 2; + + unsigned int outputHeight = inputHeight; + unsigned int outputWidth = inputWidth; + unsigned int outputChannels = inputChannels; + unsigned int outputNum = inputNum; + + unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels }; + unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels }; + + auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + std::vector input = + { + // Batch #0 + -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f, + // Batch #1 + -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f, + }; + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); + + float alpha = 4.f; + float beta = 0.5f; + float kappa = 9.f; + uint32_t normSize = 5; + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::NormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_NormChannelType = normChannel; + data.m_Parameters.m_NormMethodType = normMethod; + data.m_Parameters.m_NormSize = normSize; + data.m_Parameters.m_Alpha = alpha; + data.m_Parameters.m_Beta = beta; + data.m_Parameters.m_K = kappa; + data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; + + armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data()); + armnn::NormalizationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); + + std::unique_ptr workload = workloadFactory.CreateNormalization(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + switch (normMethod) + { + case armnn::NormalizationAlgorithmMethod::LocalBrightness: + { + switch (normChannel) + { + case armnn::NormalizationAlgorithmChannel::Across: + { + expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, + -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, }; + break; + } + default: + { + throw armnn::UnimplementedException("Unsupported normalisation channel type, " + "only Across and Within are supported"); + } + } + break; + } + case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough. + default: + { + throw armnn::UnimplementedException("Unsupported normalisation method type, " + "only LocalBrightness is supported"); + } + } + + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + } // anonymous namespace LayerTestResult SimpleNormalizationAcrossTest( @@ -405,3 +529,17 @@ LayerTestResult CompareNormalizationTest( workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, normChannel, normMethod); } + +LayerTestResult AcrossChannelNormalizationTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness; + auto normChannel = armnn::NormalizationAlgorithmChannel::Across; + return AcrossChannelNormalizationTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + normChannel, + normMethod); +} diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp index 3a276e8c4c..bbbbc4fe02 100644 --- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp @@ -35,3 +35,8 @@ LayerTestResult CompareNormalizationTest( const armnn::ITensorHandleFactory& refTensorHandleFactory, armnn::NormalizationAlgorithmChannel normChannel, armnn::NormalizationAlgorithmMethod normMethod); + +LayerTestResult AcrossChannelNormalizationTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp index 086f8757dd..a09e387b0e 100644 --- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp @@ -54,15 +54,11 @@ LayerTestResult Pad2dTestCommon( }, qScale, qOffset); - auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - armnn::PadQueueDescriptor descriptor; std::vector> padList; @@ -81,14 +77,17 @@ LayerTestResult Pad2dTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template @@ -140,15 +139,11 @@ LayerTestResult Pad3dTestCommon( }, qScale, qOffset); - auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - armnn::PadQueueDescriptor descriptor; std::vector> PadList; @@ -167,14 +162,17 @@ LayerTestResult Pad3dTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template @@ -380,10 +378,7 @@ LayerTestResult Pad4dTestCommon( }, qScale, qOffset); - auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -407,14 +402,17 @@ LayerTestResult Pad4dTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template @@ -453,10 +451,7 @@ LayerTestResult PadQAsymmTestCommon( p, p, p, p, p, p, p }; - auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -480,14 +475,17 @@ LayerTestResult PadQAsymmTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } // diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp index 74d29f0250..91add545ec 100644 --- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp @@ -27,10 +27,8 @@ LayerTestResult SimplePermuteTestImpl( const std::vector& outputExpectedData) { IgnoreUnused(memoryManager); - auto input = MakeTensor(inputTensorInfo, inputData); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -46,13 +44,16 @@ LayerTestResult SimplePermuteTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputExpectedData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp index 2275b9f07a..1eaf1f9d66 100644 --- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp @@ -37,8 +37,10 @@ LayerTestResult SimplePooling2dTestImpl( armnn::Pooling2dDescriptor descriptor, float qScale, int32_t qOffset, - const boost::multi_array& input, - const boost::multi_array& outputExpected) + const std::vector& input, + const std::vector& outputExpected, + const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputShape) { IgnoreUnused(memoryManager); const armnn::DataLayout dataLayout = descriptor.m_DataLayout; @@ -47,15 +49,15 @@ LayerTestResult SimplePooling2dTestImpl( auto widthIndex = dimensionIndices.GetWidthIndex(); auto channelsIndex = dimensionIndices.GetChannelsIndex(); - unsigned int inputHeight = armnn::numeric_cast(input.shape()[heightIndex]); - unsigned int inputWidth = armnn::numeric_cast(input.shape()[widthIndex]); - unsigned int inputChannels = armnn::numeric_cast(input.shape()[channelsIndex]); - unsigned int inputBatchSize = armnn::numeric_cast(input.shape()[0]); + unsigned int inputHeight = armnn::numeric_cast(inputShape[heightIndex]); + unsigned int inputWidth = armnn::numeric_cast(inputShape[widthIndex]); + unsigned int inputChannels = armnn::numeric_cast(inputShape[channelsIndex]); + unsigned int inputBatchSize = armnn::numeric_cast(inputShape[0]); - unsigned int outputHeight = armnn::numeric_cast(outputExpected.shape()[heightIndex]); - unsigned int outputWidth = armnn::numeric_cast(outputExpected.shape()[widthIndex]); - unsigned int outputChannels = armnn::numeric_cast(outputExpected.shape()[channelsIndex]); - unsigned int outputBatchSize = armnn::numeric_cast(outputExpected.shape()[0]); + unsigned int outputHeight = armnn::numeric_cast(outputShape[heightIndex]); + unsigned int outputWidth = armnn::numeric_cast(outputShape[widthIndex]); + unsigned int outputChannels = armnn::numeric_cast(outputShape[channelsIndex]); + unsigned int outputBatchSize = armnn::numeric_cast(outputShape[0]); armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo( inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType); @@ -73,6 +75,7 @@ LayerTestResult SimplePooling2dTestImpl( } LayerTestResult result(outputTensorInfo); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -89,10 +92,10 @@ LayerTestResult SimplePooling2dTestImpl( armnn::BackendId backend = workloadFactory.GetBackendId(); const size_t reasonIfUnsupportedMaxLen = 255; char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; - result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo, - queueDescriptor.m_Parameters, - reasonIfUnsupported, reasonIfUnsupportedMaxLen); - if (!result.supported) + result.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo, + queueDescriptor.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!result.m_Supported) { return result; } @@ -102,13 +105,14 @@ LayerTestResult SimplePooling2dTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - result.outputExpected = outputExpected; + result.m_ActualData = actualOutput; + result.m_ExpectedData = outputExpected; return result; } @@ -194,15 +198,14 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end()); std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator); - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputData, qScale, qOffset)); + auto input = QuantizedVector(inputData, qScale, qOffset); // These were calculated manually. - auto shape(GetTensorShapeAsArray<4>(outputTensorInfo)); - boost::multi_array outputExpected(shape); + std::vector outputExpected; if (forceNoPadding) { - outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + outputExpected = QuantizedVector( + { 8.0f, 8.0f, 8.0f, 9.0f, 7.0f, 9.0f, 9.0f, 9.0f, 9.0f, @@ -219,12 +222,12 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( -1.0f, 0.0f, 0.0f, -1.0f, -1.0f, -1.0f }, - qScale, qOffset)); + qScale, qOffset); } else { - outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + outputExpected = QuantizedVector( + { 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f, 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f, 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f, @@ -241,11 +244,12 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f, 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f }, - qScale, qOffset)); + qScale, qOffset); } return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -312,12 +316,9 @@ LayerTestResult SimpleMaxPooling2dTestCommon( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, inputData); - - auto outputExpected = MakeTensor(outputTensorInfo, outputData); - return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -384,12 +385,9 @@ LayerTestResult SimpleAveragePooling2dTestCommon( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, inputData); - - auto outputExpected = MakeTensor(outputTensorInfo, outputData); - return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -422,26 +420,23 @@ LayerTestResult LargeTensorsAveragePooling2dTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - std::vector inputVec; + std::vector input; for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i) { - inputVec.push_back(1); + input.push_back(1); } - auto input = MakeTensor(inputTensorInfo, inputVec); - - std::vector outputVec; + std::vector outputExpected; for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i) { - outputVec.push_back(1); + outputExpected.push_back(1); } - auto outputExpected = MakeTensor(outputTensorInfo, outputVec); - return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -499,12 +494,9 @@ LayerTestResult SimpleL2Pooling2dTestCommon( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, inputData); - - auto outputExpected = MakeTensor(outputTensorInfo, outputData); - return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -522,25 +514,26 @@ LayerTestResult L2Pooling2dSize3Stride1TestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, }, - qScale, qOffset)); + qScale, qOffset); armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 3.0f, 3.0f, 3.0f, 3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -558,8 +551,8 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, @@ -570,19 +563,20 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, }, - qScale, qOffset)); + qScale, qOffset); armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -600,8 +594,8 @@ LayerTestResult L2Pooling2dSize3Stride4TestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType); - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, @@ -610,18 +604,19 @@ LayerTestResult L2Pooling2dSize3Stride4TestCommon( 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, }, - qScale, qOffset)); + qScale, qOffset); armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 3.0f, 3.0f, 3.0f, 3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -639,8 +634,8 @@ LayerTestResult L2Pooling2dSize7TestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType); - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f, @@ -649,17 +644,18 @@ LayerTestResult L2Pooling2dSize7TestCommon( 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, }, - qScale, qOffset)); + qScale, qOffset); armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -677,8 +673,8 @@ LayerTestResult L2Pooling2dSize9TestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, @@ -689,17 +685,18 @@ LayerTestResult L2Pooling2dSize9TestCommon( 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, }, - qScale, qOffset)); + qScale, qOffset); armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -727,21 +724,22 @@ LayerTestResult AsymmetricNonSquarePooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; // Construct input data. - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 1.0f, 3.0f, 4.0f, }, - qScale, qOffset)); + qScale, qOffset); // These were calculated manually. - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 0.0f, 3.0f, 0.0f, 3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -788,7 +786,9 @@ LayerTestResult ComparePooling2dTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = MakeRandomTensor(inputTensorInfo, 81715); + std::vector input = MakeRandomTensor(inputTensorInfo, 81715); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); LayerTestResult comparisonResult(outputTensorInfo); @@ -817,10 +817,10 @@ LayerTestResult ComparePooling2dTestCommon( armnn::BackendId backend = workloadFactory.GetBackendId(); const size_t reasonIfUnsupportedMaxLen = 255; char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; - comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo, - data.m_Parameters, - reasonIfUnsupported, reasonIfUnsupportedMaxLen); - if (!comparisonResult.supported) + comparisonResult.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo, + data.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!comparisonResult.m_Supported) { return comparisonResult; } @@ -838,14 +838,17 @@ LayerTestResult ComparePooling2dTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); workload->Execute(); workloadRef->Execute(); - CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get()); - CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); + + comparisonResult.m_ActualData = actualOutput; + comparisonResult.m_ExpectedData = expectedOutput; return comparisonResult; } @@ -924,14 +927,15 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputData, qScale, qOffset)); + auto input = QuantizedVector(inputData, qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, + auto outputExpected = forceNoPadding ? QuantizedVector(expectedOutputDataNoPadding, qScale, qOffset) : - QuantizedVector(expectedOutputDataWithPadding, qScale, qOffset)); + QuantizedVector(expectedOutputDataWithPadding, qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } // @@ -1003,14 +1007,15 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputData, qScale, qOffset)); + auto input = QuantizedVector(inputData, qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, + auto outputExpected = forceNoPadding ? QuantizedVector(expectedOutputDataNoPadding, qScale, qOffset) : - QuantizedVector(expectedOutputDataWithPadding, qScale, qOffset)); + QuantizedVector(expectedOutputDataWithPadding, qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } @@ -1044,25 +1049,26 @@ LayerTestResult IgnorePaddingSimpleMaxPooling2dTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { -1.0f, -2.0f, 3.0f, 4.0f, -1.0f, -2.0f, 3.0f, 4.0f, 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { -1.0f, 3.0f, 4.0f, 1.0f, 3.0f, 4.0f, 1.0f, 2.0f, -4.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -1095,26 +1101,27 @@ LayerTestResult IgnorePaddingMaxPooling2dSize3TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { -1.0f, -2.0f, 3.0f, 4.0f, -1.0f, -2.0f, 3.0f, 4.0f, 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { -1.0f, 3.0f, 4.0f, 4.0f, 2.0f, 3.0f, 4.0f, 4.0f, 2.0f, 3.0f, 4.0f, 4.0f, 2.0f, 2.0f, 2.0f, -3.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -1147,25 +1154,26 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 12.0f, 20.0f, 32.0f, 40.0f, 12.0f, 20.0f, 32.0f, 40.0f, 12.0f, 20.0f, 32.0f, 40.0f, 12.0f, 20.0f, 32.0f, 40.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 3.0f, 13.0f, 10.0f, 6.0f, 26.0f, 20.0f, 3.0f, 13.0f, 10.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -1199,24 +1207,25 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 2.0f, 3.5f, 2.0f, 3.5f }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -1249,26 +1258,27 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 9.0f, 27.0f, 18.0f, 36.0f, 18.0f, 9.0f, 18.0f, 9.0f, 27.0f, 18.0f, 9.0f, 27.0f, 9.0f, 27.0f, 9.0f, 18.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 7.0f, 11.0f, 13.0f, 9.0f, 12.0f, 17.0f, 19.0f, 13.0f, 12.0f, 16.0f, 16.0f, 10.0f, 9.0f, 11.0f, 12.0f, 7.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -1301,25 +1311,26 @@ LayerTestResult IgnorePaddingSimpleL2Pooling2dTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 2.0f, 4.0f, 8.0f, 16.0f, 4.0f, 2.0f, 2.0f, 4.0f, 8.0f, 2.0f, 4.0f, 2.0f, 16.0f, 2.0f, 2.0f, 8.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 1.0f, 4.4721f, 8.0f, 4.4721f, 2.6457f, 2.236f, 8.0f, 1.4142f, 4.0f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } template> @@ -1352,26 +1363,27 @@ LayerTestResult IgnorePaddingL2Pooling2dSize3TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector({ + auto input = QuantizedVector( + { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, }, - qScale, qOffset)); + qScale, qOffset); - auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector({ + auto outputExpected = QuantizedVector( + { 1.0540f, 1.7638f, 2.5385f, 2.3570f, 1.2909f, 2.1602f, 3.1091f, 2.8867f, 1.2909f, 2.1602f, 3.1091f, 2.8867f, 1.0540f, 1.7638f, 2.5385f, 2.3570f, }, - qScale, qOffset)); + qScale, qOffset); return SimplePooling2dTestImpl( - workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected); + workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, + input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp index a5c53d0e58..3cf85817c8 100644 --- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp @@ -61,22 +61,18 @@ LayerTestResult PreluTest( 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f }; - auto input = MakeTensor(inputTensorInfo, - armnnUtils::QuantizedVector(inputData, - inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset())); + std::vector input = armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()); - auto alpha = MakeTensor(alphaTensorInfo, - armnnUtils::QuantizedVector(alphaData, + std::vector alpha = armnnUtils::QuantizedVector(alphaData, alphaTensorInfo.GetQuantizationScale(), - alphaTensorInfo.GetQuantizationOffset())); + alphaTensorInfo.GetQuantizationOffset()); - LayerTestResult result(outputTensorInfo); - result.outputExpected = - MakeTensor(outputTensorInfo, - armnnUtils::QuantizedVector(outputExpectedData, - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset())); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputExpectedData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr alphaHandle = tensorHandleFactory.CreateTensorHandle(alphaTensorInfo); @@ -94,12 +90,15 @@ LayerTestResult PreluTest( alphaHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - CopyDataToITensorHandle(alphaHandle.get(), &alpha[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(alphaHandle.get(), alpha.data()); workload->Execute(); - CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp index 5a36856e54..029d50e718 100644 --- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp @@ -31,10 +31,7 @@ LayerTestResult QuantizeTestImpl( armnn::QuantizeQueueDescriptor descriptor) { IgnoreUnused(memoryManager); - boost::multi_array input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -48,13 +45,16 @@ LayerTestResult QuantizeTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template > diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp index aeed272446..c483d2cdc6 100644 --- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp @@ -14,7 +14,7 @@ template LayerTestResult RankTest( armnn::TensorInfo inputTensorInfo, - boost::multi_array input, + std::vector input, armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) @@ -24,8 +24,8 @@ LayerTestResult RankTest( const armnn::TensorShape outputShape{armnn::Dimensionality::Scalar}; armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, { n }); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput = { n }; std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -40,13 +40,16 @@ LayerTestResult RankTest( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template @@ -56,9 +59,7 @@ LayerTestResult RankDimSize1Test( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputTensorInfo({6}, ArmnnType, 1.0f, 0); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( - { -37.5f, -15.2f, -8.76f, -2.0f, -1.3f, -0.5f }, - inputTensorInfo)); + auto input = ConvertToDataType({ -37.5f, -15.2f, -8.76f, -2.0f, -1.3f, -0.5f }, inputTensorInfo); return RankTest(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory); } @@ -70,9 +71,7 @@ LayerTestResult RankDimSize2Test( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputTensorInfo({1, 3}, ArmnnType, 1.0f, 0); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( - { -37.5f, -15.2f, -8.76f }, - inputTensorInfo)); + auto input = ConvertToDataType({ -37.5f, -15.2f, -8.76f }, inputTensorInfo); return RankTest(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory); } @@ -84,9 +83,7 @@ LayerTestResult RankDimSize3Test( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputTensorInfo({1, 3, 2}, ArmnnType, 1.0f, 0); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( - { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f}, - inputTensorInfo)); + auto input = ConvertToDataType({ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f}, inputTensorInfo); return RankTest(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory); } @@ -98,10 +95,10 @@ LayerTestResult RankDimSize4Test( const armnn::ITensorHandleFactory& tensorHandleFactory) { armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType, 1.0f, 0); - auto input = MakeTensor(inputTensorInfo, ConvertToDataType( + auto input = ConvertToDataType( { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }, - inputTensorInfo)); + inputTensorInfo); return RankTest(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory); } diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp index 2757eceb8a..0aacee1aa5 100644 --- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp @@ -15,7 +15,7 @@ template LayerTestResult RankTest( armnn::TensorInfo inputTensorInfo, - boost::multi_array input, + std::vector input, armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp index 18821b9549..9f5422bcbc 100644 --- a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp @@ -28,10 +28,9 @@ LayerTestResult ReduceTestCommon( bool keepDims = false) { IgnoreUnused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); + auto inputTensor = ConvertToDataType(inputData, inputTensorInfo); - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -65,13 +64,16 @@ LayerTestResult ReduceTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin()); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // namespace diff --git a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp index 589cc03cbc..7ce03ad13a 100644 --- a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp @@ -30,10 +30,9 @@ LayerTestResult ReductionTestCommon( bool keepDims = false) { IgnoreUnused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); + auto inputTensor = ConvertToDataType(inputData, inputTensorInfo); - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -67,13 +66,16 @@ LayerTestResult ReductionTestCommon( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin()); + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // namespace diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp index fbedb943f4..c3aacad4b0 100644 --- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp @@ -25,10 +25,7 @@ LayerTestResult SimpleReshapeTestImpl( const std::vector& outputExpectedData) { IgnoreUnused(memoryManager); - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -43,13 +40,16 @@ LayerTestResult SimpleReshapeTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->Execute(); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputExpectedData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp index a2a804d54e..7706bde60d 100644 --- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp @@ -95,10 +95,10 @@ LayerTestResult ResizeTestImpl( std::vector inputData = armnnUtils::QuantizedVector(params.m_InputData, params.m_InQuantScale, params.m_InQuantOffset); - std::vector expectedOutputData = - armnnUtils::QuantizedVector(params.m_ExpectedOutputData, - params.m_OutQuantScale, - params.m_OutQuantOffset); + std::vector actualOutput(outputInfo.GetNumElements()); + std::vector expectedOutputData = armnnUtils::QuantizedVector(params.m_ExpectedOutputData, + params.m_OutQuantScale, + params.m_OutQuantOffset); if (params.m_DataLayout == armnn::DataLayout::NHWC) { @@ -106,11 +106,6 @@ LayerTestResult ResizeTestImpl( PermuteTensorNchwToNhwc(outputInfo, expectedOutputData); } - auto input = MakeTensor(inputInfo, inputData); - - LayerTestResult result(outputInfo); - result.outputExpected = MakeTensor(outputInfo, expectedOutputData); - std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); @@ -132,13 +127,17 @@ LayerTestResult ResizeTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->PostAllocationConfigure(); workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); - return result; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp index a2e6e2473f..f3e28363c2 100644 --- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp @@ -39,12 +39,9 @@ LayerTestResult SliceTestImpl( outputInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = - MakeTensor(inputInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); - - LayerTestResult result(outputInfo); - result.outputExpected = - MakeTensor(outputInfo, armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); + std::vector actualOutput(outputInfo.GetNumElements()); ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputInfo); @@ -64,9 +61,12 @@ LayerTestResult SliceTestImpl( ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(result.output.data(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp index 9688ce49f2..375bdaa130 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp @@ -82,10 +82,10 @@ LayerTestResult SimpleSoftmaxBaseTestImpl( outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); - LayerTestResult ret(outputTensorInfo); - // Each row is independently softmax'd. - auto input = MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputData, qScale, qOffset); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -102,18 +102,18 @@ LayerTestResult SimpleSoftmaxBaseTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), input.origin()); + CopyDataToITensorHandle(inputHandle.get(), input.data()); ARMNN_ASSERT(workload); ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - std::vector expectedOutput = armnnUtils::QuantizedVector(outputData, qScale, qOffset); - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); - - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> @@ -259,9 +259,9 @@ LayerTestResult CompareSoftmaxTestImpl( outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); - - LayerTestResult ret(outputTensorInfo); - auto input = MakeRandomTensor(inputTensorInfo, 0xF00D, 0.0f, 1.0f); + auto input = MakeRandomTensor(inputTensorInfo, 0xF00D, 0.0f, 1.0f); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + std::vector expectedOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -292,17 +292,20 @@ LayerTestResult CompareSoftmaxTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); - CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); + CopyDataToITensorHandle(inputHandleRef.get(), input.data()); ExecuteWorkload(*workload, memoryManager); workloadRef->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); - CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp index 6dbf82090b..44a37f4fe8 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp @@ -58,12 +58,9 @@ LayerTestResult SpaceToBatchNdTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = MakeTensor(inputTensorInfo, - armnnUtils::QuantizedVector(inputData, qScale, qOffset)); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, - armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -77,13 +74,16 @@ LayerTestResult SpaceToBatchNdTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp index 8ff9157ec0..9175aec8c6 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp @@ -59,12 +59,9 @@ LayerTestResult SpaceToDepthTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = MakeTensor(inputTensorInfo, - armnnUtils::QuantizedVector(inputData, qScale, qOffset)); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, - armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -78,13 +75,16 @@ LayerTestResult SpaceToDepthTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp index e2040b5b20..e19a3216c3 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp @@ -47,7 +47,6 @@ std::vector> SplitterTestCommon( unsigned int outputHeight2 = inputHeight; unsigned int outputChannels2 = 2; - // Define the tensor descriptors. armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset); @@ -75,13 +74,8 @@ std::vector> SplitterTestCommon( outputTensorInfo4.SetQuantizationOffset(qOffset); } - LayerTestResult ret1(outputTensorInfo1); - LayerTestResult ret2(outputTensorInfo2); - LayerTestResult ret3(outputTensorInfo3); - LayerTestResult ret4(outputTensorInfo4); - - auto input = MakeTensor(inputTensorInfo, std::vector( - armnnUtils::QuantizedVector({ + auto input = armnnUtils::QuantizedVector( + { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, @@ -103,12 +97,11 @@ std::vector> SplitterTestCommon( 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, }, - qScale, qOffset) - )); + qScale, qOffset); // Channel 0 of the original input. - ret1.outputExpected = MakeTensor(outputTensorInfo1, std::vector( - armnnUtils::QuantizedVector({ + auto expectedData1 = armnnUtils::QuantizedVector( + { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, @@ -116,12 +109,11 @@ std::vector> SplitterTestCommon( 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, }, - qScale, qOffset) - )); + qScale, qOffset); // Channel 1 & 2 of the original input. - ret2.outputExpected = MakeTensor(outputTensorInfo2, std::vector( - armnnUtils::QuantizedVector({ + auto expectedData2 = armnnUtils::QuantizedVector( + { 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, @@ -136,12 +128,11 @@ std::vector> SplitterTestCommon( 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, }, - qScale, qOffset) - )); + qScale, qOffset); // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input). - ret3.outputExpected = MakeTensor(outputTensorInfo3, std::vector( - armnnUtils::QuantizedVector({ + auto expectedData3 = armnnUtils::QuantizedVector( + { 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, @@ -149,12 +140,11 @@ std::vector> SplitterTestCommon( 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, }, - qScale, qOffset) - )); + qScale, qOffset); // Channel 1 of return 2. - ret4.outputExpected = MakeTensor(outputTensorInfo4, std::vector( - armnnUtils::QuantizedVector({ + auto expectedData4 = armnnUtils::QuantizedVector( + { 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, @@ -162,8 +152,12 @@ std::vector> SplitterTestCommon( 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, }, - qScale, qOffset) - )); + qScale, qOffset); + + std::vector actualData1(outputTensorInfo1.GetNumElements()); + std::vector actualData2(outputTensorInfo2.GetNumElements()); + std::vector actualData3(outputTensorInfo3.GetNumElements()); + std::vector actualData4(outputTensorInfo4.GetNumElements()); // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x @@ -219,12 +213,12 @@ std::vector> SplitterTestCommon( outputHandle1->Allocate(); outputHandle2->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get()); - CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get()); + CopyDataFromITensorHandle(actualData1.data(), outputHandle1.get()); + CopyDataFromITensorHandle(actualData2.data(), outputHandle2.get()); // Do the second split. armnn::SplitterQueueDescriptor data2; @@ -243,8 +237,13 @@ std::vector> SplitterTestCommon( ExecuteWorkload(*workload2, memoryManager); - CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get()); - CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get()); + CopyDataFromITensorHandle(actualData3.data(), outputHandle3.get()); + CopyDataFromITensorHandle(actualData4.data(), outputHandle4.get()); + + LayerTestResult ret1(actualData1, expectedData1, outputHandle1->GetShape(), outputTensorInfo1.GetShape()); + LayerTestResult ret2(actualData2, expectedData2, outputHandle2->GetShape(), outputTensorInfo2.GetShape()); + LayerTestResult ret3(actualData3, expectedData3, outputHandle3->GetShape(), outputTensorInfo3.GetShape()); + LayerTestResult ret4(actualData4, expectedData4, outputHandle4->GetShape(), outputTensorInfo4.GetShape()); std::vector> ret = {ret1, ret2, ret3, ret4,}; @@ -259,10 +258,10 @@ LayerTestResult CopyViaSplitterTestImpl( float qScale, int32_t qOffset) { IgnoreUnused(memoryManager); + const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset); - auto input = MakeTensor( - tensorInfo, - armnnUtils::QuantizedVector({ + auto input = armnnUtils::QuantizedVector( + { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, @@ -284,7 +283,9 @@ LayerTestResult CopyViaSplitterTestImpl( 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, }, - qScale, qOffset)); + qScale, qOffset); + + std::vector actualOutput(tensorInfo.GetNumElements()); std::vector origin = { 0, 0, 0 }; armnn::SplitterQueueDescriptor::ViewOrigin window(origin); @@ -309,15 +310,16 @@ LayerTestResult CopyViaSplitterTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); workload->Execute(); - LayerTestResult ret(tensorInfo); - CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); - ret.outputExpected = input; + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + input, + outputHandle->GetShape(), + tensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp index 16e709d528..25989f90ed 100644 --- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp @@ -33,14 +33,13 @@ LayerTestResult StackTestHelper( { IgnoreUnused(memoryManager); unsigned int numInputs = static_cast(inputData.size()); - std::vector> inputs; + std::vector> inputs; for (unsigned int i = 0; i < numInputs; ++i) { - inputs.push_back(MakeTensor(inputTensorInfo, inputData[i])); + inputs.emplace_back(inputData[i]); } - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::vector> inputHandles; for (unsigned int i = 0; i < numInputs; ++i) @@ -60,7 +59,7 @@ LayerTestResult StackTestHelper( std::unique_ptr& inputHandle = inputHandles[i]; AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); inputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), inputs[i].origin()); + CopyDataToITensorHandle(inputHandle.get(), inputs[i].data()); } AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); @@ -70,9 +69,12 @@ LayerTestResult StackTestHelper( workload->Execute(); - CopyDataFromITensorHandle(result.output.origin(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return result; + return LayerTestResult(actualOutput, + outputExpectedData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp index 66a3b14e3f..af4b089cde 100644 --- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp @@ -40,12 +40,9 @@ LayerTestResult StridedSliceTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = - MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = - MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); + std::vector input = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); @@ -66,9 +63,12 @@ LayerTestResult StridedSliceTestImpl( ExecuteWorkload(*workload, memoryManager); - CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutput, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp index 85ce7e5e6f..cd775729cd 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp @@ -183,8 +183,8 @@ LayerTestResult TransposeConvolution2dTest( // execute test TransposeConvolution2dTestImpl(workloadFactory, - memoryManager, - tensorHandleFactory, + memoryManager, + tensorHandleFactory, descriptor, input, output, @@ -193,11 +193,10 @@ LayerTestResult TransposeConvolution2dTest( // construct result object LayerTestResult testResult(outputInfo); - testResult.output = MakeTensor(outputInfo, output.second); - testResult.outputExpected = MakeTensor(outputInfo, - armnnUtils::QuantizedVector(expectedOutputData, - outputInfo.GetQuantizationScale(), - outputInfo.GetQuantizationOffset())); + testResult.m_ActualData = output.second; + testResult.m_ExpectedData = armnnUtils::QuantizedVector(expectedOutputData, + outputInfo.GetQuantizationScale(), + outputInfo.GetQuantizationOffset()); return testResult; } @@ -611,6 +610,8 @@ LayerTestResult TransposeConvolution2dPerAxisQuantTest( std::vector biasData = { -12, -8 }; + std::vector actualOutput(outputInfo.GetNumElements()); + std::vector expectedOutputData = { 9, 13, 21, 19, 27, @@ -665,11 +666,12 @@ LayerTestResult TransposeConvolution2dPerAxisQuantTest( ExecuteWorkload(*workload, memoryManager); - LayerTestResult ret(outputInfo); - CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); - ret.outputExpected = MakeTensor(outputInfo, expectedOutputData); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + expectedOutputData, + outputHandle->GetShape(), + outputInfo.GetShape()); } // diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp index d0f9e82197..6be8bcb5cb 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp @@ -28,10 +28,7 @@ LayerTestResult SimpleTransposeTestImpl( const std::vector& outputExpectedData) { IgnoreUnused(memoryManager); - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); @@ -47,13 +44,16 @@ LayerTestResult SimpleTransposeTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); workload->Execute(); - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - return ret; + return LayerTestResult(actualOutput, + outputExpectedData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); } template> diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index e1729fc7de..918ef039a3 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -241,6 +241,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam, ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest) // Pooling ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp index c26f7bdae8..1048e73c1b 100644 --- a/src/backends/cl/test/ClMemCopyTests.cpp +++ b/src/backends/cl/test/ClMemCopyTests.cpp @@ -19,7 +19,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu) { LayerTestResult result = MemCopyTest(false); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -27,7 +28,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu) { LayerTestResult result = MemCopyTest(false); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -35,7 +37,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -43,7 +46,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp index 1b86d2e304..7c8e27710b 100644 --- a/src/backends/cl/test/OpenClTimerTest.cpp +++ b/src/backends/cl/test/OpenClTimerTest.cpp @@ -55,22 +55,22 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32); TensorInfo tensorInfo({channels}, DataType::Float32); - auto input = MakeTensor(inputTensorInfo, - { - 1.f, 4.f, - 4.f, 2.f, - 1.f, 6.f, + std::vector input = + { + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, - 1.f, 1.f, - 4.f, 1.f, - -2.f, 4.f - }); + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + }; // these values are per-channel of the input - auto mean = MakeTensor(tensorInfo, { 3.f, -2.f }); - auto variance = MakeTensor(tensorInfo, { 4.f, 9.f }); - auto beta = MakeTensor(tensorInfo, { 3.f, 2.f }); - auto gamma = MakeTensor(tensorInfo, { 2.f, 1.f }); + std::vector mean = { 3.f, -2.f }; + std::vector variance = { 4.f, 9.f }; + std::vector beta = { 3.f, 2.f }; + std::vector gamma = { 2.f, 1.f }; ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); @@ -84,10 +84,10 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) ScopedTensorHandle betaTensor(tensorInfo); ScopedTensorHandle gammaTensor(tensorInfo); - AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); - AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); - AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); - AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); + AllocateAndCopyDataToITensorHandle(&meanTensor, mean.data()); + AllocateAndCopyDataToITensorHandle(&varianceTensor, variance.data()); + AllocateAndCopyDataToITensorHandle(&betaTensor, beta.data()); + AllocateAndCopyDataToITensorHandle(&gammaTensor, gamma.data()); AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -105,7 +105,7 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), input.data()); OpenClTimer openClTimer; diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 6cd26dfdd2..d12817e159 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -874,6 +874,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest) // Resize Bilinear - NCHW data layout ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear, SimpleResizeBilinearTest, DataLayout::NCHW) diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp index 6a3d05d000..2bb9e3d431 100644 --- a/src/backends/neon/test/NeonMemCopyTests.cpp +++ b/src/backends/neon/test/NeonMemCopyTests.cpp @@ -20,7 +20,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon) { LayerTestResult result = MemCopyTest(false); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -28,7 +29,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu) { LayerTestResult result = MemCopyTest(false); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -36,7 +38,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors) { LayerTestResult result = MemCopyTest(true); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -44,7 +47,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - auto predResult = CompareTensors(result.output, result.outputExpected); + auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData, + result.m_ActualShape, result.m_ExpectedShape); BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp index 9acd0e41e2..df014d5a9b 100644 --- a/src/backends/neon/test/NeonTimerTest.cpp +++ b/src/backends/neon/test/NeonTimerTest.cpp @@ -63,10 +63,6 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure) armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32); - LayerTestResult result(inputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, inputData); - ARMNN_NO_DEPRECATE_WARN_BEGIN std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -87,7 +83,7 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure) inputHandle->Allocate(); outputHandle->Allocate(); - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); NeonTimer neonTimer; // Start the timer. diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 228df0946f..df48877108 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -474,6 +474,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest) // Softmax ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) -- cgit v1.2.1