aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-06-01 09:24:52 +0100
committerSadik Armagan <sadik.armagan@arm.com>2021-06-02 13:00:56 +0000
commit483c811ea6fd0e7801aac1afd979ed02a649064b (patch)
treea0969c8786528334b62043b40983fa21d54d524e
parent31f86bfeb311ccc0c6ed94c35a78a51551148ea4 (diff)
downloadarmnn-483c811ea6fd0e7801aac1afd979ed02a649064b.tar.gz
IVGCVSW-5962 Remove boost::multi_array
* Replaced all instances of boost::multi_array with flat vectors. * Updated LayerTestResult struct with new member variables. * Updated CompareTensor function to compare flat vectors and the shape. * Removed MakeTensor function from TensorHelpers.hpp. * Removed GetTensorShapeAsArray function from LayerTestResult.hpp. * Removed boost::array usage. * Removed boost::extents usages. * Removed boost::random usages. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743
-rw-r--r--src/armnn/test/TensorHelpers.hpp159
-rw-r--r--src/armnn/test/UnitTests.hpp25
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp10
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp19
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp11
-rw-r--r--src/backends/aclCommon/test/MemCopyTestImpl.hpp29
-rw-r--r--src/backends/aclCommon/test/MemCopyTests.cpp12
-rw-r--r--src/backends/backendsCommon/test/ActivationFixture.hpp23
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp23
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp195
-rw-r--r--src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp129
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp15
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp90
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp510
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp94
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp770
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp23
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp28
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp28
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp28
-rw-r--r--src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp15
-rw-r--r--src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp13
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp57
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp260
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp27
-rw-r--r--src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp25
-rw-r--r--src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp33
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp130
-rw-r--r--src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp19
-rw-r--r--src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp50
-rw-r--r--src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp68
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp19
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp35
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp1710
-rw-r--r--src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp24
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp244
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp5
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp58
-rw-r--r--src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp13
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp330
-rw-r--r--src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp33
-rw-r--r--src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp31
-rw-r--r--src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp23
-rw-r--r--src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp37
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp82
-rw-r--r--src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp24
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp14
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp1
-rw-r--r--src/backends/cl/test/ClMemCopyTests.cpp12
-rw-r--r--src/backends/cl/test/OpenClTimerTest.cpp36
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp1
-rw-r--r--src/backends/neon/test/NeonMemCopyTests.cpp12
-rw-r--r--src/backends/neon/test/NeonTimerTest.cpp6
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp1
71 files changed, 3044 insertions, 2885 deletions
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index ceb6d0f9d7..b8788e7826 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -5,19 +5,18 @@
#pragma once
#include "PredicateResult.hpp"
+
#include <armnn/Tensor.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnnUtils/FloatingPointComparison.hpp>
#include <QuantizeHelper.hpp>
-#include <boost/multi_array.hpp>
-#include <boost/random/uniform_real_distribution.hpp>
-#include <boost/random/mersenne_twister.hpp>
#include <boost/test/unit_test.hpp>
#include <array>
#include <cmath>
+#include <random>
#include <vector>
constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f;
@@ -70,56 +69,91 @@ bool SelectiveCompareBoolean(T a, T b)
return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));
};
-template <typename T, std::size_t n>
-armnn::PredicateResult CompareTensors(const boost::multi_array<T, n>& a,
- const boost::multi_array<T, n>& b,
- bool compareBoolean = false,
- bool isDynamic = false)
+template <typename T>
+armnn::PredicateResult CompareTensors(const std::vector<T>& actualData,
+ const std::vector<T>& expectedData,
+ const armnn::TensorShape& actualShape,
+ const armnn::TensorShape& expectedShape,
+ bool compareBoolean = false,
+ bool isDynamic = false)
{
+ if (actualData.size() != expectedData.size())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different data size ["
+ << actualData.size()
+ << "!="
+ << expectedData.size()
+ << "]";
+ return res;
+ }
+
+ if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different number of dimensions ["
+ << actualShape.GetNumDimensions()
+ << "!="
+ << expectedShape.GetNumDimensions()
+ << "]";
+ return res;
+ }
+
+ if (actualShape.GetNumElements() != expectedShape.GetNumElements())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different number of elements ["
+ << actualShape.GetNumElements()
+ << "!="
+ << expectedShape.GetNumElements()
+ << "]";
+ return res;
+ }
+
+ unsigned int numberOfDimensions = actualShape.GetNumDimensions();
+
if (!isDynamic)
{
// Checks they are same shape.
- for (unsigned int i = 0;
- i < n;
- i++)
+ for (unsigned int i = 0; i < numberOfDimensions; ++i)
{
- if (a.shape()[i] != b.shape()[i])
+ if (actualShape[i] != expectedShape[i])
{
armnn::PredicateResult res(false);
res.Message() << "Different shapes ["
- << a.shape()[i]
+ << actualShape[i]
<< "!="
- << b.shape()[i]
+ << expectedShape[i]
<< "]";
return res;
}
}
}
- // Now compares element-wise.
-
// Fun iteration over n dimensions.
- std::array<unsigned int, n> indices;
- for (unsigned int i = 0; i < n; i++)
+ std::vector<unsigned int> indices;
+ for (unsigned int i = 0; i < numberOfDimensions; i++)
{
- indices[i] = 0;
+ indices.emplace_back(0);
}
std::stringstream errorString;
int numFailedElements = 0;
constexpr int maxReportedDifferences = 3;
+ unsigned int index = 0;
+ // Compare data element by element.
while (true)
{
bool comparison;
// As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans.
if(compareBoolean)
{
- comparison = SelectiveCompareBoolean(a(indices), b(indices));
+ comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]);
}
else
{
- comparison = SelectiveCompare(a(indices), b(indices));
+ comparison = SelectiveCompare(actualData[index], expectedData[index]);
}
if (!comparison)
@@ -133,34 +167,35 @@ armnn::PredicateResult CompareTensors(const boost::multi_array<T, n>& a,
errorString << ", ";
}
errorString << "[";
- for (unsigned int i = 0; i < n; ++i)
+ for (unsigned int i = 0; i < numberOfDimensions; ++i)
{
errorString << indices[i];
- if (i != n - 1)
+ if (i != numberOfDimensions - 1)
{
errorString << ",";
}
}
errorString << "]";
- errorString << " (" << +a(indices) << " != " << +b(indices) << ")";
+ errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")";
}
}
- ++indices[n - 1];
- for (unsigned int i=n-1; i>0; i--)
+ ++indices[numberOfDimensions - 1];
+ for (unsigned int i=numberOfDimensions-1; i>0; i--)
{
- if (indices[i] == a.shape()[i])
+ if (indices[i] == actualShape[i])
{
indices[i] = 0;
++indices[i - 1];
}
}
-
- if (indices[0] == a.shape()[0])
+ if (indices[0] == actualShape[0])
{
break;
}
+
+ index++;
}
armnn::PredicateResult comparisonResult(true);
@@ -178,64 +213,14 @@ armnn::PredicateResult CompareTensors(const boost::multi_array<T, n>& a,
return comparisonResult;
}
-
-// Creates a boost::multi_array with the shape defined by the given TensorInfo.
-template <typename T, std::size_t n>
-boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo)
-{
- std::array<unsigned int, n> shape;
-
- for (unsigned int i = 0; i < n; i++)
- {
- shape[i] = tensorInfo.GetShape()[i];
- }
-
- return boost::multi_array<T, n>(shape);
-}
-
-// Creates a boost::multi_array with the shape defined by the given TensorInfo and contents defined by the given vector.
-template <typename T, std::size_t n>
-boost::multi_array<T, n> MakeTensor(
- const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat, bool isDynamic = false)
-{
- if (!isDynamic)
- {
- ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
- }
-
- std::array<unsigned int, n> shape;
-
- // NOTE: tensorInfo.GetNumDimensions() might be different from n
- const unsigned int returnDimensions = static_cast<unsigned int>(n);
- const unsigned int actualDimensions = tensorInfo.GetNumDimensions();
-
- const unsigned int paddedDimensions =
- returnDimensions > actualDimensions ? returnDimensions - actualDimensions : 0u;
-
- for (unsigned int i = 0u; i < returnDimensions; i++)
- {
- if (i < paddedDimensions)
- {
- shape[i] = 1u;
- }
- else
- {
- shape[i] = tensorInfo.GetShape()[i - paddedDimensions];
- }
- }
-
- boost::const_multi_array_ref<T, n> arrayRef(&flat[0], shape);
- return boost::multi_array<T, n>(arrayRef);
-}
-
-template <typename T, std::size_t n>
-boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
- unsigned int seed,
- float min = -10.0f,
- float max = 10.0f)
+template <typename T>
+std::vector<T> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
+ unsigned int seed,
+ float min = -10.0f,
+ float max = 10.0f)
{
- boost::random::mt19937 gen(seed);
- boost::random::uniform_real_distribution<float> dist(min, max);
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> dist(min, max);
std::vector<float> init(tensorInfo.GetNumElements());
for (unsigned int i = 0; i < init.size(); i++)
@@ -246,5 +231,5 @@ boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
const float qScale = tensorInfo.GetQuantizationScale();
const int32_t qOffset = tensorInfo.GetQuantizationOffset();
- return MakeTensor<T, n>(tensorInfo, armnnUtils::QuantizedVector<T>(init, qScale, qOffset));
+ return armnnUtils::QuantizedVector<T>(init, qScale, qOffset);
}
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index b55b13d4c8..bb91c4d055 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -11,7 +11,9 @@
#include <backendsCommon/test/LayerTests.hpp>
#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
+
#include "TensorHelpers.hpp"
+
#include <boost/test/unit_test.hpp>
inline void ConfigureLoggingTest()
@@ -38,11 +40,15 @@ template <typename T, std::size_t n>
void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult<T, n>& testResult)
{
bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
- BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.supported,
- "The test name does not match the supportedness it is reporting");
- if (testResult.supported)
+ BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
+ "The test name does not match the supportedness it is reporting");
+ if (testResult.m_Supported)
{
- auto result = CompareTensors(testResult.output, testResult.outputExpected, testResult.compareBoolean);
+ auto result = CompareTensors(testResult.m_ActualData,
+ testResult.m_ExpectedData,
+ testResult.m_ActualShape,
+ testResult.m_ExpectedShape,
+ testResult.m_CompareBoolean);
BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
@@ -53,11 +59,14 @@ void CompareTestResultIfSupported(const std::string& testName, const std::vector
bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
for (unsigned int i = 0; i < testResult.size(); ++i)
{
- BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].supported,
- "The test name does not match the supportedness it is reporting");
- if (testResult[i].supported)
+ BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
+ "The test name does not match the supportedness it is reporting");
+ if (testResult[i].m_Supported)
{
- auto result = CompareTensors(testResult[i].output, testResult[i].outputExpected);
+ auto result = CompareTensors(testResult[i].m_ActualData,
+ testResult[i].m_ExpectedData,
+ testResult[i].m_ActualShape,
+ testResult[i].m_ExpectedShape);
BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index 5f5ec1c5f4..a62cb96eb6 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -20,6 +20,7 @@
#include <fmt/format.h>
+#include <vector>
using armnnDeserializer::IDeserializer;
using TensorRawPtr = armnnSerializer::TensorInfo*;
@@ -218,14 +219,14 @@ void ParserFlatbuffersSerializeFixture::RunTest(
}
// Allocate storage for the output tensors to be written to and setup the armnn output tensors.
- std::map<std::string, boost::multi_array<OutputDataType, NumOutputDimensions>> outputStorage;
+ std::map<std::string, std::vector<OutputDataType>> outputStorage;
armnn::OutputTensors outputTensors;
for (auto&& it : expectedOutputData)
{
armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
m_Parser->GetNetworkOutputBindingInfo(layersId, it.first));
armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnOutputType);
- outputStorage.emplace(it.first, MakeTensor<OutputDataType, NumOutputDimensions>(bindingInfo.second));
+ outputStorage.emplace(it.first, std::vector<OutputDataType>(bindingInfo.second.GetNumElements()));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
}
@@ -237,8 +238,9 @@ void ParserFlatbuffersSerializeFixture::RunTest(
{
armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
m_Parser->GetNetworkOutputBindingInfo(layersId, it.first));
- auto outputExpected = MakeTensor<OutputDataType, NumOutputDimensions>(bindingInfo.second, it.second);
- auto result = CompareTensors(outputExpected, outputStorage[it.first]);
+ auto outputExpected = it.second;
+ auto result = CompareTensors(outputExpected, outputStorage[it.first],
+ bindingInfo.second.GetShape(), bindingInfo.second.GetShape());
BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index f333ac0d40..196af190fd 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -293,7 +293,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
// Allocate storage for the output tensors to be written to and setup the armnn output tensors.
- std::map<std::string, boost::multi_array<DataType2, NumOutputDimensions>> outputStorage;
+ std::map<std::string, std::vector<DataType2>> outputStorage;
armnn::OutputTensors outputTensors;
for (auto&& it : expectedOutputData)
{
@@ -309,7 +309,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
it.first));
armnn::VerifyTensorInfoDataType(outputTensorInfo, armnnType2);
- outputStorage.emplace(it.first, MakeTensor<DataType2, NumOutputDimensions>(outputTensorInfo));
+ outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
outputTensors.push_back(
{ outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
}
@@ -320,8 +320,10 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : expectedOutputData)
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second, isDynamic);
- auto result = CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic);
+ auto outputExpected = it.second;
+ auto result = CompareTensors(outputExpected, outputStorage[it.first],
+ bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
+ false, isDynamic);
BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
@@ -393,7 +395,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
// Allocate storage for the output tensors to be written to and setup the armnn output tensors.
- std::map<std::string, boost::multi_array<DataType2, NumOutputDimensions>> outputStorage;
+ std::map<std::string, std::vector<DataType2>> outputStorage;
armnn::OutputTensors outputTensors;
for (auto&& it : expectedOutputData)
{
@@ -409,7 +411,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
it.first));
armnn::VerifyTensorInfoDataType(outputTensorInfo, outputType);
- outputStorage.emplace(it.first, MakeTensor<DataType2, NumOutputDimensions>(outputTensorInfo));
+ outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.GetNumElements()));
outputTensors.push_back(
{ outputBindingId, armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
}
@@ -420,8 +422,9 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : expectedOutputData)
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second);
- auto result = CompareTensors(outputExpected, outputStorage[it.first], false);
+ auto outputExpected = it.second;
+ auto result = CompareTensors(outputExpected, outputStorage[it.first],
+ bindingInfo.second.GetShape(), bindingInfo.second.GetShape(), false);
BOOST_TEST(result.m_Result, result.m_Message.str());
}
} \ No newline at end of file
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index ad991efa36..0ff7e59ac2 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -193,12 +193,12 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
}
// Allocates storage for the output tensors to be written to and sets up the armnn output tensors.
- std::map<std::string, boost::multi_array<T, NumOutputDimensions>> outputStorage;
+ std::map<std::string, std::vector<T>> outputStorage;
armnn::OutputTensors outputTensors;
for (auto&& it : expectedOutputData)
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
- outputStorage.emplace(it.first, MakeTensor<T, NumOutputDimensions>(bindingInfo.second));
+ outputStorage.emplace(it.first, std::vector<T>(bindingInfo.second.GetNumElements()));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
}
@@ -252,15 +252,16 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
}
}
- auto outputExpected = MakeTensor<T, NumOutputDimensions>(bindingInfo.second, it.second);
+ auto outputExpected = it.second;
+ auto shape = bindingInfo.second.GetShape();
if (std::is_same<T, uint8_t>::value)
{
- auto result = CompareTensors(outputExpected, outputStorage[it.first], true);
+ auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape, true);
BOOST_TEST(result.m_Result, result.m_Message.str());
}
else
{
- auto result = CompareTensors(outputExpected, outputStorage[it.first]);
+ auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape);
BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
diff --git a/src/backends/aclCommon/test/MemCopyTestImpl.hpp b/src/backends/aclCommon/test/MemCopyTestImpl.hpp
index 1f542d24b4..91ba4eae17 100644
--- a/src/backends/aclCommon/test/MemCopyTestImpl.hpp
+++ b/src/backends/aclCommon/test/MemCopyTestImpl.hpp
@@ -15,8 +15,6 @@
#include <test/TensorHelpers.hpp>
-#include <boost/multi_array.hpp>
-
namespace
{
@@ -28,21 +26,20 @@ LayerTestResult<T, 4> MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory,
const std::array<unsigned int, 4> shapeData = { { 1u, 1u, 6u, 5u } };
const armnn::TensorShape tensorShape(4, shapeData.data());
const armnn::TensorInfo tensorInfo(tensorShape, dataType);
- boost::multi_array<T, 4> inputData = MakeTensor<T, 4>(tensorInfo, std::vector<T>(
- {
- 1, 2, 3, 4, 5,
- 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30,
- })
- );
+ std::vector<T> inputData =
+ {
+ 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30,
+ };
LayerTestResult<T, 4> ret(tensorInfo);
- ret.outputExpected = inputData;
+ ret.m_ExpectedData = inputData;
- boost::multi_array<T, 4> outputData(shapeData);
+ std::vector<T> actualOutput(tensorInfo.GetNumElements());
ARMNN_NO_DEPRECATE_WARN_BEGIN
auto inputTensorHandle = srcWorkloadFactory.CreateTensorHandle(tensorInfo);
@@ -71,8 +68,8 @@ LayerTestResult<T, 4> MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory,
dstWorkloadFactory.CreateMemCopy(memCopyQueueDesc, workloadInfo)->Execute();
- CopyDataFromITensorHandle(outputData.data(), workloadOutput.get());
- ret.output = outputData;
+ CopyDataFromITensorHandle(actualOutput.data(), workloadOutput.get());
+ ret.m_ActualData = actualOutput;
return ret;
}
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
index ffba19323a..7612cbfe28 100644
--- a/src/backends/aclCommon/test/MemCopyTests.cpp
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -48,7 +48,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -56,7 +57,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -64,7 +66,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -72,7 +75,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
diff --git a/src/backends/backendsCommon/test/ActivationFixture.hpp b/src/backends/backendsCommon/test/ActivationFixture.hpp
index d28174d6a6..c61f3f097e 100644
--- a/src/backends/backendsCommon/test/ActivationFixture.hpp
+++ b/src/backends/backendsCommon/test/ActivationFixture.hpp
@@ -11,20 +11,13 @@
#include <test/TensorHelpers.hpp>
-#include <boost/multi_array.hpp>
-
struct ActivationFixture
{
ActivationFixture()
{
- auto boostArrayExtents = boost::extents
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
- output.resize(boostArrayExtents);
- outputExpected.resize(boostArrayExtents);
- input.resize(boostArrayExtents);
+ output.resize(batchSize * channels * height * width);
+ outputExpected.resize(batchSize * channels * height * width);
+ input.resize(batchSize * channels * height * width);
unsigned int inputShape[] = { batchSize, channels, height, width };
unsigned int outputShape[] = { batchSize, channels, height, width };
@@ -32,7 +25,7 @@ struct ActivationFixture
inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
- input = MakeRandomTensor<float, 4>(inputTensorInfo, 21453);
+ input = MakeRandomTensor<float>(inputTensorInfo, 21453);
}
unsigned int width = 17;
@@ -40,9 +33,9 @@ struct ActivationFixture
unsigned int channels = 2;
unsigned int batchSize = 5;
- boost::multi_array<float, 4> output;
- boost::multi_array<float, 4> outputExpected;
- boost::multi_array<float, 4> input;
+ std::vector<float> output;
+ std::vector<float> outputExpected;
+ std::vector<float> input;
armnn::TensorInfo inputTensorInfo;
armnn::TensorInfo outputTensorInfo;
@@ -57,6 +50,6 @@ struct PositiveActivationFixture : public ActivationFixture
{
PositiveActivationFixture()
{
- input = MakeRandomTensor<float, 4>(inputTensorInfo, 2342423, 0.0f, 1.0f);
+ input = MakeRandomTensor<float>(inputTensorInfo, 2342423, 0.0f, 1.0f);
}
}; \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index 404a412ca0..c68051c8ca 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -24,14 +24,12 @@
namespace
{
-using MultiArray = const boost::multi_array<uint8_t, 2>&;
-
-armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
- MultiArray expectedOutput)
+armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape,
+ armnn::TensorShape& outputExpectedShape)
{
- auto batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- auto outputSize = armnn::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
+ auto batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ auto inputSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
+ auto outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
float inputOutputScale = 0.0078125f;
int32_t inputOutputOffset = 128;
@@ -182,26 +180,21 @@ void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
{
std::vector<uint8_t> inputVector = {166, 179, 50, 150};
armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
- boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
- boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
// Builds up the structure of the network
- armnn::INetworkPtr net = CreateQuantizedLstmNetwork(input, outputStateOut);
+ armnn::INetworkPtr net = CreateQuantizedLstmNetwork(inputDesc.GetShape(), outputDesc.GetShape());
BOOST_TEST_CHECKPOINT("create a network");
@@ -227,8 +220,8 @@ void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
outputTensors.reserve(2);
//output
- std::vector<int16_t > cellStateOutResult(cellStateOutVector.size());
- std::vector<uint8_t > outputStateOutResult(outputStateOutVector.size());
+ std::vector<int16_t> cellStateOutResult(cellStateOutVector.size());
+ std::vector<uint8_t> outputStateOutResult(outputStateOutVector.size());
outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), cellStateOutResult.data())});
outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), outputStateOutResult.data())});
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 6d83b1ca99..54052073a9 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -17,8 +17,6 @@
#include <test/TensorHelpers.hpp>
-#include <boost/multi_array.hpp>
-
#include <algorithm>
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -58,9 +56,7 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
outputTensorInfo.SetQuantizationOffset(outputOffset);
}
- LayerTestResult<T, 4> result(inputTensorInfo);
-
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -80,15 +76,16 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputExpectedData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
@@ -245,7 +242,7 @@ struct BoundedReLuRandomInputTestTraits
}
};
-boost::multi_array<float, 4> BoundedReLuRandomInputTest(
+std::vector<float> BoundedReLuRandomInputTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
@@ -257,11 +254,10 @@ boost::multi_array<float, 4> BoundedReLuRandomInputTest(
const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
- boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
-
// Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
// range [lowerBound, upperBound].
- auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
+ std::vector<float> input = MakeRandomTensor<float>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -278,13 +274,13 @@ boost::multi_array<float, 4> BoundedReLuRandomInputTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return output;
+ return actualOutput;
}
} // namespace
@@ -305,16 +301,16 @@ LayerTestResult<float, 4> CompareBoundedReLuTest(
activationDescriptor.m_A = upperBound;
activationDescriptor.m_B = lowerBound;
- result.output = BoundedReLuRandomInputTest(
+ result.m_ActualData = BoundedReLuRandomInputTest(
workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
- result.outputExpected = BoundedReLuRandomInputTest(
+ result.m_ExpectedData = BoundedReLuRandomInputTest(
refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
return result;
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T,4> ConstantLinearActivationTestCommon(
+LayerTestResult<T, 4> ConstantLinearActivationTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
@@ -344,7 +340,6 @@ LayerTestResult<T,4> ConstantLinearActivationTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- LayerTestResult<T, 4> ret(outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -362,17 +357,20 @@ LayerTestResult<T,4> ConstantLinearActivationTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 7123561);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
- workload->Execute();
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ workload->Execute();
- // Ensure output equals input.
- ret.outputExpected = input;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ // Use input as ExpectedData as tensor doesn't change.
+ return LayerTestResult<T, 4>(actualOutput,
+ input,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 4> ConstantLinearActivationTest(
@@ -441,9 +439,11 @@ LayerTestResult<T, 4> SimpleActivationTest(
outputTensorInfo.SetQuantizationOffset(outOffset);
}
- LayerTestResult<T, 4> result(inputTensorInfo);
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, scale, offset);
- auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
+ // Calculated outputExpected manually.
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -463,17 +463,16 @@ LayerTestResult<T, 4> SimpleActivationTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
- // Calculated manually.
- result.outputExpected =
- MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputExpected,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -497,8 +496,8 @@ LayerTestResult<T, 4> SimpleSigmoidTestCommon(
{
return 1.0f / (1.0f + std::exp(-value));
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> m_OutputExpected(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -511,7 +510,7 @@ LayerTestResult<T, 4> SimpleSigmoidTestCommon(
inputData,
1.f / 256.f,
0,
- outputExpectedData);
+ m_OutputExpected);
}
LayerTestResult<float, 4> SimpleSigmoidTest(
@@ -561,8 +560,8 @@ LayerTestResult<T, 4> ReLuTestCommon(
{
return std::fmax(0.0f, value);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> outputExpected(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -575,7 +574,7 @@ LayerTestResult<T, 4> ReLuTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ outputExpected);
}
LayerTestResult<int16_t, 4> ReLuInt16Test(
@@ -625,8 +624,8 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
{
return std::min(a, std::max(b, value));
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> outputExpected(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -639,7 +638,7 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ outputExpected);
}
LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
@@ -672,8 +671,8 @@ LayerTestResult<T, 4> SoftReLuTestCommon(
{
return std::log(1.0f + std::exp(value));
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> outputExpected(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -686,7 +685,7 @@ LayerTestResult<T, 4> SoftReLuTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ outputExpected);
}
LayerTestResult<float, 4> SoftReLuTest(
@@ -735,8 +734,8 @@ LayerTestResult<T, 4> LeakyReLuTestCommon(
{
return value > 0.0f ? value : (value * a);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> outputExpected(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -749,7 +748,7 @@ LayerTestResult<T, 4> LeakyReLuTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ outputExpected);
}
LayerTestResult<float, 4> LeakyReLuTest(
@@ -797,8 +796,8 @@ LayerTestResult<T, 4> AbsTestCommon(
{
return std::abs(value);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> outputExpected(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -811,7 +810,7 @@ LayerTestResult<T, 4> AbsTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ outputExpected);
}
LayerTestResult<float, 4> AbsTest(
@@ -856,17 +855,15 @@ LayerTestResult<float, 5> SqrtNNTest(
{
return std::sqrt(value);
};
- std::vector<float> outputExpectedData(inputDataSize);
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> expectedOutput(inputDataSize);
+ std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
armnn::TensorInfo inputTensorInfo(
{ 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
armnn::TensorInfo outputTensorInfo(
{ 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
- LayerTestResult<float, 5> result(inputTensorInfo);
-
- auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -883,16 +880,16 @@ LayerTestResult<float, 5> SqrtNNTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
-
- // Calculated manually.
- result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<float, 5>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
};
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -915,8 +912,8 @@ LayerTestResult<T, 4> SqrtTestCommon(
{
return std::sqrt(value);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> expectedOutput(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -929,7 +926,7 @@ LayerTestResult<T, 4> SqrtTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ expectedOutput);
}
LayerTestResult<float, 4> SqrtTest(
@@ -976,8 +973,8 @@ LayerTestResult<T, 4> SquareTestCommon(
{
return std::pow(value,2);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> expectedOutput(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -990,7 +987,7 @@ LayerTestResult<T, 4> SquareTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ expectedOutput);
}
LayerTestResult<float, 4> SquareTest(
@@ -1040,8 +1037,8 @@ LayerTestResult<T, 4> TanhTestCommon(
{
return a * tanhf(b * value);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> expectedOutput(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -1054,7 +1051,7 @@ LayerTestResult<T, 4> TanhTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ expectedOutput);
}
LayerTestResult<float, 4> TanhTest(
@@ -1104,8 +1101,8 @@ LayerTestResult<T, 4> EluTestCommon(
{
return (value >= 0) ? value : a * (expf(value) - 1);
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> expectedOutput(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -1118,7 +1115,7 @@ LayerTestResult<T, 4> EluTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ expectedOutput);
}
LayerTestResult<float, 4> EluTest(
@@ -1172,8 +1169,8 @@ LayerTestResult<T, 4> HardSwishTestCommon(
float result = hardSwish_step1 / 6;
return result;
};
- std::vector<float> outputExpectedData(inputData.size());
- std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+ std::vector<float> expectedOutput(inputData.size());
+ std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
return SimpleActivationTest<ArmnnType>(workloadFactory,
memoryManager,
@@ -1186,7 +1183,7 @@ LayerTestResult<T, 4> HardSwishTestCommon(
inputData,
qScale,
qOffset,
- outputExpectedData);
+ expectedOutput);
}
LayerTestResult<float, 4> HardSwishTest(
@@ -1216,7 +1213,7 @@ LayerTestResult<int16_t, 4> HardSwishInt16Test(
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T,4> CompareActivationTestImpl(
+LayerTestResult<T, 4> CompareActivationTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
@@ -1258,17 +1255,9 @@ LayerTestResult<T,4> CompareActivationTestImpl(
minVal = 0.f;
}
- boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
-
-
- LayerTestResult<T,4> ret(outputTensorInfo);
- auto boostArrayExtents = boost::extents
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
- [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
- ret.output.resize(boostArrayExtents);
- ret.outputExpected.resize(boostArrayExtents);
+ std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 21453, minVal, 10.f);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -1299,19 +1288,23 @@ LayerTestResult<T,4> CompareActivationTestImpl(
inputHandleRef->Allocate();
outputHandleRef->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
workload->Execute();
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
+
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
- return ret;
}
-LayerTestResult<float,4> CompareActivationTest(
+LayerTestResult<float, 4> CompareActivationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
@@ -1325,7 +1318,7 @@ LayerTestResult<float,4> CompareActivationTest(
refTensorHandleFactory, f, batchSize);
}
-LayerTestResult<uint8_t,4> CompareActivationUint8Test(
+LayerTestResult<uint8_t, 4> CompareActivationUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
@@ -1338,7 +1331,7 @@ LayerTestResult<uint8_t,4> CompareActivationUint8Test(
tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
}
-LayerTestResult<int16_t,4> CompareActivationInt16Test(
+LayerTestResult<int16_t, 4> CompareActivationInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index 0e1b7336de..ce8f74d2e0 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -186,7 +186,7 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
+ auto input1 = armnnUtils::QuantizedVector<T>(
{
0.0f,
1.0f,
@@ -197,17 +197,18 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
4.0f,
5.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
+ auto input2 = armnnUtils::QuantizedVector<T>(
{
0.5f, 1.5f, 2.5f,
3.5f, 4.5f, 5.5f,
},
- qScale, qOffset));
+ qScale, qOffset);
- LayerTestResult<T,4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ auto expectedOutput = armnnUtils::QuantizedVector<T>(
{
0.5f, 1.5f, 2.5f,
4.5f, 5.5f, 6.5f,
@@ -218,7 +219,7 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
4.5f, 5.5f, 6.5f,
8.5f, 9.5f, 10.5f,
},
- qScale, qOffset));
+ qScale, qOffset);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
@@ -236,15 +237,18 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -270,7 +274,7 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
+ auto input1 = armnnUtils::QuantizedVector<T>(
{
0.0f, 1.0f, 2.0f,
3.0f, 4.0f, 5.0f,
@@ -279,16 +283,17 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
12.0f, 13.0f, 14.0f,
15.0f, 16.0f, 17.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
+ auto input2 = armnnUtils::QuantizedVector<T>(
{
0.5f,
},
- qScale, qOffset));
+ qScale, qOffset);
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
- LayerTestResult<T,4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
+ auto expectedOutput = armnnUtils::QuantizedVector<T>(
{
0.5f, 1.5f, 2.5f,
3.5f, 4.5f, 5.5f,
@@ -297,7 +302,7 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
12.5f, 13.5f, 14.5f,
15.5f, 16.5f, 17.5f,
},
- qScale, qOffset));
+ qScale, qOffset);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
@@ -315,15 +320,18 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 4> AdditionBroadcastTest(
@@ -545,11 +553,10 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
- boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
- {1, 2, 3,
- 4, 5, 6,
- 7, 8, 9
- });
+ std::vector<float> poolingInput = {1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ };
std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
@@ -575,37 +582,26 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
// Create the MaxPool
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
- //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
- auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
- boost::multi_array<float, 4> resultMaxPool;
- resultMaxPool.resize(shape);
-
+ std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
// Create addition with another tensor the same size
// This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
// with the initial tensor.
// 12, 16
// 24, 28
+ armnn::TensorInfo addInputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
+ armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
- armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
- armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
-
- boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
- {12, 16,
- 24, 28,
- });
+ std::vector<float> addInput = { 12, 16,
+ 24, 28 };
// Expected output tensor after MaxPool and Addition.
- LayerTestResult<float,4> addRet(addOutputTensorInfo);
- addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
- {
- 13, 19,
- 31, 37
- }));
+ std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput = { 13, 19,
+ 31, 37 };
std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> addOutputHandle =
- tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
armnn::AdditionQueueDescriptor data;
armnn::WorkloadInfo info;
@@ -622,20 +618,23 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
addInputHandle->Allocate();
addOutputHandle->Allocate();
- CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
- CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
+ CopyDataToITensorHandle(poolingInputHandle.get(), poolingInput.data());
+ CopyDataFromITensorHandle(resultMaxPool.data(), poolingOutputHandle.get());
- CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
- CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
+ CopyDataToITensorHandle(poolingOutputHandle.get(), resultMaxPool.data());
+ CopyDataToITensorHandle(addInputHandle.get(), addInput.data());
workload->PostAllocationConfigure();
workload->Execute();
addWorkload->PostAllocationConfigure();
addWorkload->Execute();
- CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), addOutputHandle.get());
- return addRet;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ addOutputHandle->GetShape(),
+ addOutputTensorInfo.GetShape());
}
LayerTestResult<float,4> CompareAdditionTest(
@@ -660,10 +659,11 @@ LayerTestResult<float,4> CompareAdditionTest(
inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
- auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
+ auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
+ auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
- LayerTestResult<float,4> ret(outputTensorInfo);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
@@ -695,18 +695,21 @@ LayerTestResult<float,4> CompareAdditionTest(
inputHandle2Ref->Allocate();
outputHandleRef->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
+ CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2Ref.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
workloadRef->PostAllocationConfigure();
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index d63cc04e99..34b2539c32 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -27,10 +27,8 @@ LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
const std::vector<int32_t>& outputData,
int axis = 3)
{
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
-
- LayerTestResult<int32_t, 3> result(outputTensorInfo);
- result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
+ std::vector<T> inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
+ std::vector<int32_t> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -48,14 +46,17 @@ LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<int32_t, 3>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // namespace
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index 969d5dbcd1..4311faff4e 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -58,18 +58,16 @@ LayerTestResult<T, 4> BatchNormTestImpl(
tensorInfo.SetQuantizationOffset(qOffset);
}
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
+ auto inputTensor = QuantizedVector<T>(inputValues, qScale, qOffset);
// These values are per-channel of the input.
- auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
- auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
- auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
- auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
+ auto mean = QuantizedVector<T>({ 3, -2 }, qScale, qOffset);
+ auto variance = QuantizedVector<T>({ 4, 9 }, qScale, qOffset);
+ auto beta = QuantizedVector<T>({ 3, 2 }, qScale, qOffset);
+ auto gamma = QuantizedVector<T>({ 2, 1 }, qScale, qOffset);
- LayerTestResult<T, 4> result(outputTensorInfo);
-
- result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -88,10 +86,10 @@ LayerTestResult<T, 4> BatchNormTestImpl(
descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
- AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
- AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
- AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
- AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
+ AllocateAndCopyDataToITensorHandle(&meanTensor, mean.data());
+ AllocateAndCopyDataToITensorHandle(&varianceTensor, variance.data());
+ AllocateAndCopyDataToITensorHandle(&betaTensor, beta.data());
+ AllocateAndCopyDataToITensorHandle(&gammaTensor, gamma.data());
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -101,13 +99,16 @@ LayerTestResult<T, 4> BatchNormTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -140,20 +141,19 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
tensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>(
+ auto input = QuantizedVector<T>(
{
1.f, 1.f, 4.f, 1.f,
4.f, 4.f, 2.f, 1.f,
1.f, -2.f, 6.f, 4.f
},
- qScale, qOffset));
+ qScale, qOffset);
+
// These values are per-channel of the input.
- auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
- auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
- auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
- auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
- LayerTestResult<T,4> ret(outputTensorInfo);
+ auto mean = QuantizedVector<T>({ 3, -2 }, qScale, qOffset);
+ auto variance = QuantizedVector<T>({ 4, 9 }, qScale, qOffset);
+ auto beta = QuantizedVector<T>({ 3, 2 }, qScale, qOffset);
+ auto gamma = QuantizedVector<T>({ 2, 1 }, qScale, qOffset);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -179,30 +179,34 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
data.m_Parameters.m_Eps = 0.0f;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
// For each channel:
// substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
// multiply by gamma and add beta
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>(
+ std::vector<T> expectedOutput = QuantizedVector<T>(
{
1.f, 3.f, 4.f, 3.f,
4.f, 4.f, 2.f, 3.f,
1.f, 2.f, 6.f, 4.f
},
- qScale, qOffset));
+ qScale, qOffset);
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
@@ -627,14 +631,15 @@ LayerTestResult<float,4> CompareBatchNormTest(
outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
- auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
+ auto input = MakeRandomTensor<float>(inputTensorInfo, 21312);
- auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
- auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
- auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
- auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
+ auto mean = MakeRandomTensor<float>(tensorInfo, 123);
+ auto variance = MakeRandomTensor<float>(tensorInfo, 234, 0.0f);
+ auto beta = MakeRandomTensor<float>(tensorInfo, 123);
+ auto gamma = MakeRandomTensor<float>(tensorInfo, 345);
- LayerTestResult<float,4> ret(outputTensorInfo);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -675,16 +680,19 @@ LayerTestResult<float,4> CompareBatchNormTest(
inputHandleRef->Allocate();
outputHandleRef->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
workload->PostAllocationConfigure();
workload->Execute();
workloadRef->PostAllocationConfigure();
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index 9d539975c7..3669281d48 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -51,11 +51,10 @@ LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
outputTensorInfo.SetQuantizationScale(scale);
outputTensorInfo.SetQuantizationOffset(offset);
- auto input = MakeTensor<T, InputDim>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
+ std::vector<T> input = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
- LayerTestResult<T, OutputDim> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo,
- ConvertToDataType<ArmnnType>(outputData, outputTensorInfo));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(outputData, outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -73,14 +72,17 @@ LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, OutputDim>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
index ad23b8c767..aec57dbad1 100644
--- a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
@@ -31,10 +31,7 @@ LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
outputTensorInfo.SetQuantizationOffset(quantizationOffset);
}
- auto input = MakeTensor<TInput, 4>(inputTensorInfo, inputValues);
-
- LayerTestResult<TOutput, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outputValues);
+ std::vector<TOutput> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -49,13 +46,16 @@ LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<TOutput, 4>(actualOutput,
+ outputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 4> CastInt32ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index be44234b76..68bc588860 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -52,10 +52,7 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
- auto input0 = MakeTensor<InType, NumDims>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<InType, NumDims>(inputTensorInfo1, values1);
-
- LayerTestResult<uint8_t, NumDims> ret(outputTensorInfo);
+ std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
@@ -75,18 +72,19 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
inputHandle1->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
- CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
+ CopyDataToITensorHandle(inputHandle0.get(), values0.data());
+ CopyDataToITensorHandle(inputHandle1.get(), values1.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
-
- ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, outValues);
- ret.compareBoolean = true;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, NumDims>(actualOutput,
+ outValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape(),
+ true);
}
template <std::size_t NumDims,
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index d486bc0c19..3eca27364d 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -428,9 +428,9 @@ LayerTestResult<T, 1> Concat1dTestImpl(
{
TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
- auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
- auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
+ auto input0 = QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset);
+ auto input1 = QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset);
+ auto input2 = QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset);
TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
@@ -446,12 +446,12 @@ LayerTestResult<T, 1> Concat1dTestImpl(
0,
true);
- result.output = MakeTensor<T, 1>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -468,7 +468,7 @@ LayerTestResult<T, 2> Concat2dTestImpl(
{
TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -476,9 +476,9 @@ LayerTestResult<T, 2> Concat2dTestImpl(
// Batch 1
10.0f, 11.0f, 12.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0
4.0f, 5.0f, 6.0f,
@@ -486,9 +486,9 @@ LayerTestResult<T, 2> Concat2dTestImpl(
// Batch 1
13.0f, 14.0f, 15.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 0
7.0f, 8.0f, 9.0f,
@@ -496,7 +496,7 @@ LayerTestResult<T, 2> Concat2dTestImpl(
// Batch 1
16.0f, 17.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
LayerTestResult<T, 2> result(outputTensorInfo);
@@ -510,7 +510,7 @@ LayerTestResult<T, 2> Concat2dTestImpl(
dimension,
true);
- result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+ result.m_ActualData = output;
return result;
}
@@ -527,7 +527,7 @@ LayerTestResult<T, 2> Concat2dDim0TestImpl(
LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -547,7 +547,7 @@ LayerTestResult<T, 2> Concat2dDim0TestImpl(
// Batch 5
16.0f, 17.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -565,7 +565,7 @@ LayerTestResult<T, 2> Concat2dDim1TestImpl(
LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
@@ -573,7 +573,7 @@ LayerTestResult<T, 2> Concat2dDim1TestImpl(
// Batch 1
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -587,7 +587,7 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
int32_t qOffset)
{
TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -595,10 +595,10 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
// Batch 1
10.0f, 11.0f, 12.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0
4.0f, 5.0f, 6.0f,
@@ -609,15 +609,15 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
// Batch 0
7.0f, 8.0f, 9.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
- auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 1
16.0f, 17.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 2> result(outputTensorInfo);
@@ -632,8 +632,8 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
0,
true);
- result.output = MakeTensor<T, 2>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -653,7 +653,7 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
// Batch 5
16.0f, 17.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -667,7 +667,7 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
int32_t qOffset)
{
TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -675,10 +675,10 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
// Batch 1
10.0f, 11.0f, 12.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0
4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
@@ -686,10 +686,10 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
// Batch 1
13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
- auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 0
9.0f,
@@ -697,7 +697,7 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
// Batch 1
18.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 2> result(outputTensorInfo);
@@ -712,8 +712,8 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
1,
true);
- result.output = MakeTensor<T, 2>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
@@ -721,7 +721,7 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
// Batch 1
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -739,7 +739,7 @@ LayerTestResult<T, 3> Concat3dTestImpl(
{
TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -759,9 +759,9 @@ LayerTestResult<T, 3> Concat3dTestImpl(
// Batch 1, Channel 2
23.0f, 24.0f
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0, Channel 0
7.0f, 8.0f,
@@ -781,9 +781,9 @@ LayerTestResult<T, 3> Concat3dTestImpl(
// Batch 1, Channel 2
29.0f, 30.0f
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 0, Channel 0
13.0f, 14.0f,
@@ -803,7 +803,7 @@ LayerTestResult<T, 3> Concat3dTestImpl(
// Batch 1, Channel 2
35.0f, 36.0f
},
- qScale, qOffset));
+ qScale, qOffset);
LayerTestResult<T, 3> result(outputTensorInfo);
@@ -817,7 +817,7 @@ LayerTestResult<T, 3> Concat3dTestImpl(
dimension,
useSubtensor);
- result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+ result.m_ActualData = output;
return result;
}
@@ -834,7 +834,7 @@ LayerTestResult<T, 3> Concat3dDim0TestImpl(
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -890,7 +890,7 @@ LayerTestResult<T, 3> Concat3dDim0TestImpl(
// Batch 5, Channel 2
35.0f, 36.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -908,7 +908,7 @@ LayerTestResult<T, 3> Concat3dDim1TestImpl(
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -964,7 +964,7 @@ LayerTestResult<T, 3> Concat3dDim1TestImpl(
// Batch 1, Channel 8
35.0f, 36.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -983,7 +983,7 @@ LayerTestResult<T, 3> Concat3dDim2TestImpl(
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
@@ -1003,7 +1003,7 @@ LayerTestResult<T, 3> Concat3dDim2TestImpl(
// Batch 1, Channel 2
23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1017,7 +1017,7 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
int32_t qOffset)
{
TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
- auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -1037,10 +1037,10 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
// Batch 1, Channel 2
23.0f, 24.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
- auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0, Channel 0
7.0f, 8.0f,
@@ -1051,10 +1051,10 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
// Batch 0, Channel 2
11.0f, 12.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
- auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 0, Channel 0
25.0f, 26.0f,
@@ -1083,7 +1083,7 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
// Batch 2, Channel 2
35.0f, 36.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
@@ -1098,8 +1098,8 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
0,
true);
- result.output = MakeTensor<T, 3>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -1155,7 +1155,7 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
// Batch 5, Channel 2
35.0f, 36.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1169,7 +1169,7 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
int32_t qOffset)
{
TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -1189,10 +1189,10 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
// Batch 1, Channel 2
23.0f, 24.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0, Channel 0
7.0f, 8.0f,
@@ -1218,10 +1218,10 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
// Batch 1, Channel 3
15.0f, 16.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
- auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 0, Channel 0
17.0f, 18.0f,
@@ -1229,7 +1229,7 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
// Batch 1, Channel 0
31.0f, 32.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result(outputTensorInfo);
@@ -1244,8 +1244,8 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
1,
true);
- result.output = MakeTensor<T, 3>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -1295,7 +1295,7 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
// Batch 1, Channel 7
31.0f, 32.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1310,7 +1310,7 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
int32_t qOffset)
{
TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -1330,10 +1330,10 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
// Batch 1, Channel 2
23.0f, 24.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
// Batch 0, Channel 0
7.0f,
@@ -1353,10 +1353,10 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
// Batch 1, Channel 2
29.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
- auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
// Batch 0, Channel 0
13.0f, 14.0f, 50.0f,
@@ -1376,7 +1376,7 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
// Batch 1, Channel 2
35.0f, 36.0f, 55.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result(outputTensorInfo);
@@ -1391,8 +1391,8 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
2,
useSubtensor);
- result.output = MakeTensor<T, 3>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
// Batch 0, Channel 0
1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
@@ -1412,7 +1412,7 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
// Batch 1, Channel 2
23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1430,7 +1430,7 @@ LayerTestResult<T, 4> Concat4dTestImpl(
{
TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1439,9 +1439,9 @@ LayerTestResult<T, 4> Concat4dTestImpl(
9.0f, 10.0f,
11.0f, 12.0f
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
11.0f, 12.0f,
13.0f, 14.0f,
@@ -1450,9 +1450,9 @@ LayerTestResult<T, 4> Concat4dTestImpl(
19.0f, 20.0f,
21.0f, 22.0f
},
- qScale, qOffset));
+ qScale, qOffset);
- auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+ auto input2 = QuantizedVector<T>(
{
21.0f, 22.0f,
23.0f, 24.0f,
@@ -1461,7 +1461,7 @@ LayerTestResult<T, 4> Concat4dTestImpl(
29.0f, 30.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
LayerTestResult<T, 4> result(outputTensorInfo);
@@ -1478,7 +1478,7 @@ LayerTestResult<T, 4> Concat4dTestImpl(
dimension,
useSubtensor);
- result.output = MakeTensor<T, 4>(outputTensorInfo, output);
+ result.m_ActualData = output;
return result;
}
@@ -1495,7 +1495,7 @@ LayerTestResult<T, 4> Concat4dDim0TestImpl(
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1518,7 +1518,7 @@ LayerTestResult<T, 4> Concat4dDim0TestImpl(
29.0f, 30.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1536,7 +1536,7 @@ LayerTestResult<T, 4> Concat4dDim1TestImpl(
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1559,7 +1559,7 @@ LayerTestResult<T, 4> Concat4dDim1TestImpl(
29.0f, 30.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1577,7 +1577,7 @@ LayerTestResult<T, 4> Concat4dDim2TestImpl(
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, true, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1600,7 +1600,7 @@ LayerTestResult<T, 4> Concat4dDim2TestImpl(
29.0f, 30.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1619,7 +1619,7 @@ LayerTestResult<T, 4> Concat4dDim3TestImpl(
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
11.0f, 12.0f,
@@ -1642,7 +1642,7 @@ LayerTestResult<T, 4> Concat4dDim3TestImpl(
21.0f, 22.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1658,7 +1658,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
constexpr unsigned int dimension = 0u;
TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1667,11 +1667,11 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
9.0f, 10.0f,
11.0f, 12.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
11.0f, 12.0f,
13.0f, 14.0f,
@@ -1687,7 +1687,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
29.0f, 30.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
@@ -1705,8 +1705,8 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
dimension,
true);
- result.output = MakeTensor<T, 4>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1729,7 +1729,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
29.0f, 30.0f,
31.0f, 32.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1745,7 +1745,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
constexpr unsigned int dimension = 1u;
TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1754,18 +1754,18 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
9.0f, 10.0f,
11.0f, 12.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
11.0f, 12.0f,
13.0f, 14.0f,
15.0f, 16.0f,
17.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
@@ -1783,8 +1783,8 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
dimension,
true);
- result.output = MakeTensor<T, 4>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1797,7 +1797,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
15.0f, 16.0f,
17.0f, 18.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1813,7 +1813,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
constexpr unsigned int dimension = 2u;
TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1822,10 +1822,10 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
9.0f, 10.0f,
11.0f, 12.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
11.0f, 12.0f,
13.0f, 14.0f,
@@ -1837,7 +1837,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
25.0f, 26.0f,
27.0f, 28.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result(outputTensorInfo);
@@ -1854,8 +1854,8 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
dimension,
true);
- result.output = MakeTensor<T, 4>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1875,7 +1875,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
25.0f, 26.0f,
27.0f, 28.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1892,7 +1892,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
constexpr unsigned int dimension = 3u;
TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+ auto input0 = QuantizedVector<T>(
{
1.0f, 2.0f,
3.0f, 4.0f,
@@ -1901,10 +1901,10 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
9.0f, 10.0f,
11.0f, 12.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+ auto input1 = QuantizedVector<T>(
{
11.0f, 12.0f, 13.0f,
14.0f, 15.0f, 16.0f,
@@ -1915,7 +1915,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
23.0f, 24.0f, 25.0f,
26.0f, 27.0f, 28.0f
},
- qScale, qOffset));
+ qScale, qOffset);
TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
@@ -1933,8 +1933,8 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
dimension,
useSubtensor);
- result.output = MakeTensor<T, 4>(outputTensorInfo, output);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ result.m_ActualData = output;
+ result.m_ExpectedData = QuantizedVector<T>(
{
1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
@@ -1943,7 +1943,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
11.0f, 12.0f, 26.0f, 27.0f, 28.0f
},
- qScale, qOffset));
+ qScale, qOffset);
return result;
}
@@ -1968,7 +1968,7 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
const float inputScale1 = 0.5f;
const int32_t inputOffset1 = 5;
- auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
+ std::vector<T> input1 =
{
1, 2, 3,
4, 5, 6,
@@ -1983,13 +1983,13 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
28, 29, 30,
31, 32, 33,
34, 35, 36
- }));
+ };
// Quatized input2 tensor.
const float inputScale2 = 0.2f;
const int32_t inputOffset2 = 10;
- auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
+ std::vector<T> input2 =
{
37, 38, 39,
40, 41, 42,
@@ -1997,15 +1997,15 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
46, 47, 48,
49, 50, 51,
52, 53, 54
- }));
+ };
// Quantized output tensor.
const float outputScale = 0.1f;
const int32_t outputOffset = 20;
- LayerTestResult<T, 3> ret(outputTensorInfo);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
- ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
+ std::vector<T> expectedOutput =
{
0, 5, 74,
10, 15, 76,
@@ -2027,7 +2027,7 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
150, 155, 104,
160, 165, 106,
170, 175, 108
- }));
+ };
outputTensorInfo.SetQuantizationScale(outputScale);
outputTensorInfo.SetQuantizationOffset(outputOffset);
@@ -2075,15 +2075,18 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 3>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
//
@@ -2132,61 +2135,58 @@ LayerTestResult<float,3> ConcatTest(
TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
- LayerTestResult<float,3> ret(outputTensorInfo);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
- ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
+ std::vector<float> expectedOutput =
{
- 1.0f, 2.0f, 3.0f,
- 4.0f, 5.0f, 6.0f,
- 7.0f, 8.0f, 9.0f,
- 10.0f, 11.0f, 12.0f,
- 13.0f, 14.0f, 15.0f,
- 16.0f, 17.0f, 18.0f,
-
- 19.0f, 20.0f, 21.0f,
- 22.0f, 23.0f, 24.0f,
- 25.0f, 26.0f, 27.0f,
- 28.0f, 29.0f, 30.0f,
- 31.0f, 32.0f, 33.0f,
- 34.0f, 35.0f, 36.0f,
-
- 37.0f, 38.0f, 39.0f,
- 40.0f, 41.0f, 42.0f,
- 43.0f, 44.0f, 45.0f,
- 46.0f, 47.0f, 48.0f,
- 49.0f, 50.0f, 51.0f,
- 52.0f, 53.0f, 54.0f,
- })
- );
-
- auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
- {
- 1.0f, 2.0f, 3.0f,
- 4.0f, 5.0f, 6.0f,
- 7.0f, 8.0f, 9.0f,
- 10.0f, 11.0f, 12.0f,
- 13.0f, 14.0f, 15.0f,
- 16.0f, 17.0f, 18.0f,
-
- 19.0f, 20.0f, 21.0f,
- 22.0f, 23.0f, 24.0f,
- 25.0f, 26.0f, 27.0f,
- 28.0f, 29.0f, 30.0f,
- 31.0f, 32.0f, 33.0f,
- 34.0f, 35.0f, 36.0f,
- })
- );
-
- auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
- {
- 37.0f, 38.0f, 39.0f,
- 40.0f, 41.0f, 42.0f,
- 43.0f, 44.0f, 45.0f,
- 46.0f, 47.0f, 48.0f,
- 49.0f, 50.0f, 51.0f,
- 52.0f, 53.0f, 54.0f,
- })
- );
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f,
+ 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f,
+ 16.0f, 17.0f, 18.0f,
+
+ 19.0f, 20.0f, 21.0f,
+ 22.0f, 23.0f, 24.0f,
+ 25.0f, 26.0f, 27.0f,
+ 28.0f, 29.0f, 30.0f,
+ 31.0f, 32.0f, 33.0f,
+ 34.0f, 35.0f, 36.0f,
+
+ 37.0f, 38.0f, 39.0f,
+ 40.0f, 41.0f, 42.0f,
+ 43.0f, 44.0f, 45.0f,
+ 46.0f, 47.0f, 48.0f,
+ 49.0f, 50.0f, 51.0f,
+ 52.0f, 53.0f, 54.0f
+ };
+
+ std::vector<float> input1 =
+ {
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f,
+ 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f,
+ 16.0f, 17.0f, 18.0f,
+
+ 19.0f, 20.0f, 21.0f,
+ 22.0f, 23.0f, 24.0f,
+ 25.0f, 26.0f, 27.0f,
+ 28.0f, 29.0f, 30.0f,
+ 31.0f, 32.0f, 33.0f,
+ 34.0f, 35.0f, 36.0f
+ };
+
+ std::vector<float> input2 =
+ {
+ 37.0f, 38.0f, 39.0f,
+ 40.0f, 41.0f, 42.0f,
+ 43.0f, 44.0f, 45.0f,
+ 46.0f, 47.0f, 48.0f,
+ 49.0f, 50.0f, 51.0f,
+ 52.0f, 53.0f, 54.0f,
+ };
std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
@@ -2223,15 +2223,18 @@ LayerTestResult<float,3> ConcatTest(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<float, 3>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 1> Concat1dTest(
@@ -2448,7 +2451,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
const float inputScale1 = 0.015686f;
const int32_t inputOffset1 = 192;
- auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
+ std::vector<uint8_t> input1 =
{
1, 2, 3,
4, 5, 6,
@@ -2462,33 +2465,31 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
25, 26, 27,
28, 29, 30,
31, 32, 33,
- 34, 35, 36,
- })
- );
+ 34, 35, 36
+ };
// Quatized input2 tensor. Range [-1, 4]
const float inputScale2 = 0.019608f;
const int32_t inputOffset2 = 50;
- auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
+ std::vector<uint8_t> input2 =
{
37, 38, 39,
40, 41, 42,
43, 44, 45,
46, 47, 48,
49, 50, 51,
- 52, 53, 54,
- })
- );
+ 52, 53, 54
+ };
// Output has the same quantization parameters than input1,
// so that only the requantization of input2 is required
const float outputScale = 0.015686f;
const int32_t outputOffset = 192;
- LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
+ std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
- ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
+ std::vector<uint8_t> expectedOutput =
{
1, 2, 3,
4, 5, 6,
@@ -2509,9 +2510,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
183, 184, 186,
187, 188, 189,
191, 192, 193,
- 195, 196, 197,
- })
- );
+ 195, 196, 197
+ };
outputTensorInfo.SetQuantizationScale(outputScale);
outputTensorInfo.SetQuantizationOffset(outputOffset);
@@ -2555,15 +2555,18 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, 3>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<uint8_t, 3> ConcatUint8Test(
@@ -2601,34 +2604,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
inputTensorInfo2.SetQuantizationScale(scale);
inputTensorInfo2.SetQuantizationOffset(offset);
- LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
+ std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
- ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
- {
- 1, 2, 3,
- 4, 5, 6,
- 7, 8, 9,
- 10, 11, 12,
- 13, 14, 15,
- 16, 17, 18,
-
- 19, 20, 21,
- 22, 23, 24,
- 25, 26, 27,
- 28, 29, 30,
- 31, 32, 33,
- 34, 35, 36,
-
- 37, 38, 39,
- 40, 41, 42,
- 43, 44, 45,
- 46, 47, 48,
- 49, 50, 51,
- 52, 53, 54,
- })
- );
-
- auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
+ std::vector<uint8_t> expectedOutput =
{
1, 2, 3,
4, 5, 6,
@@ -2643,19 +2621,41 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
28, 29, 30,
31, 32, 33,
34, 35, 36,
- })
- );
- auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
+ 37, 38, 39,
+ 40, 41, 42,
+ 43, 44, 45,
+ 46, 47, 48,
+ 49, 50, 51,
+ 52, 53, 54
+ };
+
+ std::vector<uint8_t> input1 =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+ 10, 11, 12,
+ 13, 14, 15,
+ 16, 17, 18,
+
+ 19, 20, 21,
+ 22, 23, 24,
+ 25, 26, 27,
+ 28, 29, 30,
+ 31, 32, 33,
+ 34, 35, 36
+ };
+
+ std::vector<uint8_t> input2 =
{
37, 38, 39,
40, 41, 42,
43, 44, 45,
46, 47, 48,
49, 50, 51,
- 52, 53, 54,
- })
- );
+ 52, 53, 54
+ };
std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
@@ -2693,15 +2693,18 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, 3>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<uint16_t, 3> ConcatUint16Test(
@@ -2739,9 +2742,9 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
inputTensorInfo2.SetQuantizationScale(scale);
inputTensorInfo2.SetQuantizationOffset(offset);
- LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
+ std::vector<uint16_t> actualOutput(outputTensorInfo.GetNumElements());
- ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
+ std::vector<uint16_t> expectedOutput =
{
1, 2, 3,
4, 5, 6,
@@ -2762,10 +2765,10 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
43, 44, 45,
46, 47, 48,
49, 50, 51,
- 52, 53, 54,
- }));
+ 52, 53, 54
+ };
- auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
+ std::vector<uint16_t> input1 =
{
1, 2, 3,
4, 5, 6,
@@ -2780,9 +2783,9 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
28, 29, 30,
31, 32, 33,
34, 35, 36,
- }));
+ };
- auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
+ std::vector<uint16_t> input2 =
{
37, 38, 39,
40, 41, 42,
@@ -2790,7 +2793,7 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
46, 47, 48,
49, 50, 51,
52, 53, 54,
- }));
+ };
std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
@@ -2829,15 +2832,18 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
inputHandle2->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle2.get(), input2.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint16_t, 3>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<uint8_t, 1> Concat1dUint8Test(
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index c28ef40b45..bb827ef359 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -55,54 +55,52 @@ LayerTestResult<T, 4> ConstantTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- armnnUtils::QuantizedVector<T>(
- {
- // Batch 0, Channel 0
- 235.0f, 46.0f, 178.0f,
- 100.0f, 123.0f, 19.0f,
- 172.0f, 74.0f, 250.0f,
- 6.0f, 195.0f, 80.0f,
-
- // Batch 0, Channel 1
- 113.0f, 95.0f, 202.0f,
- 77.0f, 114.0f, 71.0f,
- 122.0f, 246.0f, 166.0f,
- 82.0f, 28.0f, 37.0f,
-
- // Batch 0, Channel 2
- 56.0f, 170.0f, 162.0f,
- 194.0f, 89.0f, 254.0f,
- 12.0f, 209.0f, 200.0f,
- 1.0f, 64.0f, 54.0f,
-
- // Batch 1, Channel 0
- 67.0f, 90.0f, 49.0f,
- 7.0f, 163.0f, 18.0f,
- 25.0f, 117.0f, 103.0f,
- 247.0f, 59.0f, 189.0f,
-
- // Batch 1, Channel 1
- 239.0f, 104.0f, 199.0f,
- 17.0f, 124.0f, 153.0f,
- 222.0f, 217.0f, 75.0f,
- 32.0f, 126.0f, 21.0f,
-
- // Batch 1, Channel 2
- 97.0f, 145.0f, 215.0f,
- 115.0f, 116.0f, 238.0f,
- 226.0f, 16.0f, 132.0f,
- 92.0f, 125.0f, 88.0f,
- },
- qScale, qOffset)));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = input;
+ auto input = armnnUtils::QuantizedVector<T>(
+ {
+ // Batch 0, Channel 0
+ 235.0f, 46.0f, 178.0f,
+ 100.0f, 123.0f, 19.0f,
+ 172.0f, 74.0f, 250.0f,
+ 6.0f, 195.0f, 80.0f,
+
+ // Batch 0, Channel 1
+ 113.0f, 95.0f, 202.0f,
+ 77.0f, 114.0f, 71.0f,
+ 122.0f, 246.0f, 166.0f,
+ 82.0f, 28.0f, 37.0f,
+
+ // Batch 0, Channel 2
+ 56.0f, 170.0f, 162.0f,
+ 194.0f, 89.0f, 254.0f,
+ 12.0f, 209.0f, 200.0f,
+ 1.0f, 64.0f, 54.0f,
+
+ // Batch 1, Channel 0
+ 67.0f, 90.0f, 49.0f,
+ 7.0f, 163.0f, 18.0f,
+ 25.0f, 117.0f, 103.0f,
+ 247.0f, 59.0f, 189.0f,
+
+ // Batch 1, Channel 1
+ 239.0f, 104.0f, 199.0f,
+ 17.0f, 124.0f, 153.0f,
+ 222.0f, 217.0f, 75.0f,
+ 32.0f, 126.0f, 21.0f,
+
+ // Batch 1, Channel 2
+ 97.0f, 145.0f, 215.0f,
+ 115.0f, 116.0f, 238.0f,
+ 226.0f, 16.0f, 132.0f,
+ 92.0f, 125.0f, 88.0f,
+ },
+ qScale, qOffset);
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::ScopedTensorHandle constantTensor(inputTensorInfo);
- AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
+ AllocateAndCopyDataToITensorHandle(&constantTensor, input.data());
armnn::ConstantQueueDescriptor descriptor;
descriptor.m_LayerOutput = &constantTensor;
@@ -117,8 +115,12 @@ LayerTestResult<T, 4> ConstantTestImpl(
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 4>(actualOutput,
+ input,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 8f60415a66..98264ee928 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -70,55 +70,49 @@ using namespace armnnUtils;
// Helper template that returns either Bias2 or an empty vector depending on whether bias is enabled.
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
+std::vector<T> GetBias2(bool biasEnabled, float qScale)
{
if(biasEnabled)
{
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
- boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias2, qScale, 0));
- return bias;
+ return QuantizedVector<T>(Bias2, qScale, 0);
}
else
{
- return boost::multi_array<T, 1>();
+ return std::vector<T>();
}
}
// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-boost::multi_array<T, 1> GetBias4(bool biasEnabled, float qScale)
+std::vector<T> GetBias4(bool biasEnabled, float qScale)
{
if(biasEnabled)
{
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType);
- boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias4, qScale, 0));
- return bias;
+ return QuantizedVector<T>(Bias4, qScale, 0);
}
else
{
- return boost::multi_array<T, 1>();
+ return std::vector<T>();
}
}
// Helper template that returns either Bias8 or an empty vector depending on whether bias is enabled.
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-boost::multi_array<T, 1> GetBias8(bool biasEnabled, float qScale)
+std::vector<T> GetBias8(bool biasEnabled, float qScale)
{
if(biasEnabled)
{
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType);
- boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias8, qScale, 0));
- return bias;
+ return QuantizedVector<T>(Bias8, qScale, 0);
}
else
{
- return boost::multi_array<T, 1>();
+ return std::vector<T>();
}
}
// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-boost::multi_array<T, 1> GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
+std::vector<T> GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
{
const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
@@ -201,10 +195,13 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 4>& originalInput,
- const boost::multi_array<T, 4>& originalKernel,
- const boost::multi_array<B, 1>& bias,
- const boost::multi_array<T, 4>& originalOutputExpected,
+ const std::vector<T>& originalInput,
+ const std::vector<T>& originalKernel,
+ const std::vector<B>& bias,
+ const std::vector<T>& originalOutputExpected,
+ const armnn::TensorShape& originalInputShape,
+ const armnn::TensorShape& originalKernelShape,
+ const armnn::TensorShape& originalOutputExpectedShape,
float qScale,
int32_t qOffset,
const armnn::DataLayout layout = armnn::DataLayout::NCHW,
@@ -218,20 +215,20 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
uint32_t dilationY = 1)
{
armnn::IgnoreUnused(memoryManager);
- unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInput.shape()[2]);
- unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInput.shape()[3]);
- unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInput.shape()[1]);
- unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInput.shape()[0]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
- unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
- unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
- unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
- unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
- unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernel.shape()[2]);
- unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernel.shape()[3]);
- unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernel.shape()[1]);
- unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
+ unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernelShape[0]);
bool biasEnabled = bias.size() > 0;
@@ -242,7 +239,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
// If a bias is used, its size must equal the number of output channels.
ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
-
// Note these tensors will use two (identical) batches.
armnn::TensorInfo inputTensorInfo =
armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
@@ -265,8 +261,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
biasDesc.SetQuantizationOffset(0);
}
- LayerTestResult<T, 4> ret(outputTensorInfo);
-
// Construct input data - two batches of the same input image.
std::vector<T> inputImage;
inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
@@ -283,8 +277,6 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
inputData = tmp;
}
- auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
std::vector<T> outputImage;
outputImage.assign(originalOutputExpected.data(),
originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
@@ -299,19 +291,21 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
outputWidth, outputHeight);
}
+ // Data will be copied from outputHandle
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
// Construct expected output data - two identical images.
- std::vector<T> outputData;
- outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
- outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
+ std::vector<T> expectedOutput;
+ expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
+ expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
// at this point if we require it permute the expected output
if (layout == armnn::DataLayout::NHWC)
{
- std::vector<T> tmp(outputData.size());
- armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
- outputData = tmp;
+ std::vector<T> tmp(expectedOutput.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, expectedOutput.data(), tmp.data(), sizeof(T));
+ expectedOutput = tmp;
}
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -320,17 +314,18 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::WorkloadInfo info;
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
armnn::ScopedTensorHandle biasTensor(biasDesc);
+
// Permute the kernel if necessary
- boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
+ std::vector<T> kernel = originalKernel;
if (layout == armnn::DataLayout::NHWC)
{
armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
}
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
if(biasEnabled)
{
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
}
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -353,13 +348,16 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
@@ -369,10 +367,13 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 4>& input,
- const boost::multi_array<T, 4>& kernel,
- const boost::multi_array<B, 1>& bias,
- const boost::multi_array<O, 4>& outputExpected,
+ const std::vector<T>& input,
+ const std::vector<T>& kernel,
+ const std::vector<B>& bias,
+ const std::vector<O>& outputExpected,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& kernelShape,
+ const armnn::TensorShape& outputExpectedShape,
const armnn::DataLayout dataLayout,
float qScale,
int32_t qOffset,
@@ -384,20 +385,20 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
uint32_t strideY = 1)
{
armnn::IgnoreUnused(qScale, qOffset);
- unsigned int inputNum = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[3]);
- unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[2]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[3]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[1]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[2]);
- unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernel.shape()[0]);
- unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernel.shape()[3]);
- unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernel.shape()[1]);
- unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernel.shape()[2]);
+ unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernelShape[0]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
- unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[3]);
- unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
- unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
bool biasEnabled = bias.size() > 0;
@@ -411,20 +412,18 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
// Construct the input data.
std::vector<T> inputData;
inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
- auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
// Construct the output data, with bias applied, as appropriate.
std::vector<O> outputData;
outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
- LayerTestResult<O, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<O, 4>(outputTensorInfo, outputData);
+ std::vector<O> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
armnn::ScopedTensorHandle biasTensor(biasDesc);
@@ -449,13 +448,16 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<O, 4>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
@@ -534,6 +536,8 @@ LayerTestResult<T,4> Convolution1dTestImpl(
outputInfo.GetQuantizationScale(),
outputInfo.GetQuantizationOffset());
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
+
// Optionally apply bias to output image.
if(biasEnabled)
{
@@ -574,11 +578,12 @@ LayerTestResult<T,4> Convolution1dTestImpl(
ExecuteWorkload(*workload, memoryManager);
- // Output
- LayerTestResult<T,4> ret(outputInfo);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- ret.outputExpected = MakeTensor<T, 4>(outputInfo, outputData);
- return ret;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 4>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -594,34 +599,31 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
armnn::IgnoreUnused(biasEnabled);
// Use common single-batch 5x5 image.
- armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
- {
- 1, 5, 2, 3,
- 8, 7, 3, 6,
- 3, 3, 9, 1
- });
-
+ armnn::TensorInfo inputDesc({ 1, 3, 4, 1 }, ArmnnType);
+ std::vector<T> input =
+ {
+ 1, 5, 2, 3,
+ 8, 7, 3, 6,
+ 3, 3, 9, 1
+ };
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
- boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
- 4, 5, 6,
- 0, 0, 0,
- 3, 2, 1
- });
+ armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
+ std::vector<T> kernel =
+ {
+ 4, 5, 6,
+ 0, 0, 0,
+ 3, 2, 1
+ };
// Expected output is 1 batch of a 5x5 image.
- armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
-
+ armnn::TensorInfo outputDesc({ 1, 3, 4, 1 }, ArmnnType);
const std::vector<float> outputData =
- {
- 23, 41, 33, 21,
- 44, 65, 76, 52,
- 82, 85, 79, 42
- };
-
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
+ {
+ 23, 41, 33, 21,
+ 44, 65, 76, 52,
+ 82, 85, 79, 42
+ };
return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
workloadFactory,
@@ -629,8 +631,11 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
tensorHandleFactory,
input,
kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
+ std::vector<T>(),
+ outputData,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
dataLayout,
qScale,
qOffset);
@@ -649,36 +654,33 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
armnn::IgnoreUnused(biasEnabled);
// Input is a single-batch, 1 channel, 5x5 image.
- armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
- {
- 1, 5, 2, 3, 5,
- 8, 7, 3, 6, 3,
- 3, 3, 9, 1, 9,
- 4, 1, 8, 1, 3,
- 6, 8, 1, 9, 2
- });
+ armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, ArmnnType);
+ std::vector<T> input =
+ {
+ 1, 5, 2, 3, 5,
+ 8, 7, 3, 6, 3,
+ 3, 3, 9, 1, 9,
+ 4, 1, 8, 1, 3,
+ 6, 8, 1, 9, 2
+ };
// Use a 3x3 kernel.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
- boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
- {
- 4, 5, 6,
- 0, 0, 0,
- 3, 2, 1
- });
+ armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
+ std::vector<T> kernel =
+ {
+ 4, 5, 6,
+ 0, 0, 0,
+ 3, 2, 1
+ };
// Expected output is a single-batch, 1 channel, 3x3 image.
- armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
-
- const std::vector<T> outputData =
- {
- 23, 33, 24,
- 91, 99, 48,
- 26, 50, 19
- };
-
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
+ armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, ArmnnType);
+ std::vector<T> outputData =
+ {
+ 23, 33, 24,
+ 91, 99, 48,
+ 26, 50, 19
+ };
uint32_t padLeft = 1;
uint32_t padTop = 1;
@@ -693,8 +695,11 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
tensorHandleFactory,
input,
kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
+ std::vector<T>(),
+ outputData,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
dataLayout,
qScale,
qOffset,
@@ -717,13 +722,12 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
const armnn::DataLayout layout)
{
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
+ armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
+ std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
// Use a 2-element batch with 3-channel 3x5 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
- boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo kernelDesc({ 2, 3, 5, 3 }, ArmnnType);
+ std::vector<T> kernel = QuantizedVector<T>({
1, 1, 1,
1, -1, 1,
1, 1, 1,
@@ -761,12 +765,11 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
0, 0, 0,
0, 0, 0
},
- qScale, qOffset)));
+ qScale, qOffset);
// Expected output is 2 batch elements of a 1-channel 14x4 image.
- armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo outputDesc({ 1, 2, 4, 14 }, ArmnnType);
+ std::vector<T> expectedOutput = QuantizedVector<T>({
-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
-25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
-23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
@@ -779,7 +782,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
},
- qScale, qOffset)));
+ qScale, qOffset);
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -789,6 +792,9 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
expectedOutput,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
qScale,
qOffset,
layout);
@@ -808,13 +814,13 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
// Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
+ armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
+ std::vector<unsigned int> inputShape = { 1, 3, 8, 16 };
+ std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
- boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo kernelDesc({ 2, 3, 3, 3 }, ArmnnType);
+ std::vector<T> kernel = QuantizedVector<T>({
1, 1, 1,
1, -1, 1,
1, 1, 1,
@@ -840,12 +846,11 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
0, 0, 0,
0, 0, 0
},
- qScale, qOffset)));
+ qScale, qOffset);
// Expected output is 1 batch of a 2-channel 14x6 image.
- armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo outputDesc({ 1, 2, 6, 14 }, ArmnnType);
+ std::vector<T> expectedOutput = QuantizedVector<T>({
-15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
-16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
@@ -860,7 +865,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
},
- qScale, qOffset)));
+ qScale, qOffset);
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -870,6 +875,9 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
expectedOutput,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
qScale,
qOffset,
layout);
@@ -886,23 +894,23 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
int32_t qOffset)
{
// Use a single-batch 1-channel 3x3 image as input.
- armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
+ armnn::TensorInfo inputDesc({ 1, 1, 3, 3 }, ArmnnType);
+ std::vector<T> input =
QuantizedVector<T>({
11,21,31,
12,22,32,
13,23,33
},
- qScale, qOffset)));
+ qScale, qOffset);
// Use 1 batch of a 1-channel 2x2 kernel.
- armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
- boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
+ armnn::TensorInfo kernelDesc({ 1, 1, 2, 2 }, ArmnnType);
+ std::vector<T> kernel =
QuantizedVector<T>({
-11,-21,
-12,-22,
},
- qScale, qOffset)));
+ qScale, qOffset);
// Expected output is 1 batch of a 1-channel 6x8 image.
// Manually calculated like this:
@@ -913,8 +921,8 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
- armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
+ armnn::TensorInfo outputDesc({ 1, 1, 8, 6 }, ArmnnType);
+ std::vector<T> expectedOutput =
QuantizedVector<T>({
0, 0, 0, 0, 0, 0,
-242, -594, -934, -372, 0, 0,
@@ -925,7 +933,7 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
},
- qScale, qOffset)));
+ qScale, qOffset);
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -935,6 +943,9 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
kernel,
GetBias2<ArmnnBType>(false, qScale * qScale),
expectedOutput,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
qScale,
qOffset,
layout,
@@ -956,30 +967,29 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
{
// Use a single-batch 1-channel 5x5 image as input.
armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
+ std::vector<T> input =
QuantizedVector<T>({
11,21,31,41,51,
12,22,32,42,52,
13,23,33,43,53,
14,24,34,44,54,
15,25,35,45,55,
- }, qScale, qOffset)));
+ }, qScale, qOffset);
// Use 1 batch of a 1-channel 4x4 kernel.
armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
- boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
+ std::vector<T> kernel =
QuantizedVector<T>({
-11,-21,-31,-41,
-12,-22,-32,-42,
-13,-23,-33,-43,
-14,-24,-34,-44,
},
- qScale, qOffset)));
+ qScale, qOffset);
// Expected output is 1 batch of a 1-channel 5x5 image.
armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
- std::vector<T> myVec(outputDesc.GetNumElements(), 0);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
+ std::vector<T> expectedOutput =
QuantizedVector<T>({
-7140, -10580, -13940, -9300, -5230,
-9590, -14120, -18520, -12290, -6860,
@@ -987,7 +997,7 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
-7518, -10904, -14144, -9318, -5152,
-5032, -7256, -9376, -6142, -3368,
},
- qScale, qOffset)));
+ qScale, qOffset);
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -997,6 +1007,9 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
kernel,
GetBias2<ArmnnBType>(false, qScale * qScale),
expectedOutput,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
qScale,
qOffset,
layout,
@@ -1062,19 +1075,15 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
outputTensorInfo.SetQuantizationScale(qScale);
outputTensorInfo.SetQuantizationOffset(qOffset);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset())));
- auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
- std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
- kernelTensorInfo.GetQuantizationScale(),
- kernelTensorInfo.GetQuantizationOffset())));
- auto expectedOutput =
- MakeTensor<T, 4>(outputTensorInfo,
- std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset())));
+ auto input = QuantizedVector<T>(inputNoQuantizedValues,
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset());
+ auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
+ kernelTensorInfo.GetQuantizationScale(),
+ kernelTensorInfo.GetQuantizationOffset());
+ auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset());
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -1084,6 +1093,9 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
expectedOutput,
+ inputTensorInfo.GetShape(),
+ kernelTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
qScale,
qOffset,
layout,
@@ -1105,7 +1117,7 @@ LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
std::vector<float> inputNoQuantizedValues =
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1163,7 +1175,7 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 10, 10 }, ArmnnType);
std::vector<float> inputNoQuantizedValues =
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1189,7 +1201,7 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3 }, ArmnnType);
std::vector<float> kernelNoQuantizedValues =
{
1, 2, 3,
@@ -1203,7 +1215,7 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
// Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
// therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
std::vector<float> outputExpectedNoQuantizedValues =
{
12., 10., 10., 10.,
@@ -1230,13 +1242,13 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
- armnn::IWorkloadFactory &workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
- armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
std::vector<float> inputNoQuantizedValues =
{
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -1251,7 +1263,7 @@ LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
- armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
+ armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
std::vector<float> kernelNoQuantizedValues =
{
1, 2,
@@ -1338,11 +1350,12 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
- LayerTestResult<T,4> ret(outputTensorInfo);
+ auto input = MakeRandomTensor<T>(inputTensorInfo, 124908);
+ auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
+ auto bias = MakeRandomTensor<T>(biasDesc, 1028);
- auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908);
- auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234);
- auto bias = MakeRandomTensor<T, 1>(biasDesc, 1028);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -1352,8 +1365,8 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
armnn::ScopedTensorHandle biasTensor(biasDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
+ AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -1371,11 +1384,11 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
armnn::Convolution2dQueueDescriptor refData = data;
- armnn::WorkloadInfo refInfo = info;
+ armnn::WorkloadInfo refInfo = info;
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
outputHandleRef->Allocate();
@@ -1384,18 +1397,21 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
workloadRef->PostAllocationConfigure();
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
@@ -1409,7 +1425,7 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
armnn::IgnoreUnused(biasEnabled);
// Input is a single-batch, 1 channel, 5x5 image.
- armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
+ armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, armnn::DataType::BFloat16);
std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
{
@@ -1441,8 +1457,6 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
},
1.0f, 0);
- auto input = MakeTensor<armnn::BFloat16, 4>(inputDesc, inputValues);
-
// Use a 3x3 kernel.
armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
@@ -1460,10 +1474,8 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
},
1.0f, 0);
- auto kernel = MakeTensor<armnn::BFloat16, 4>(kernelDesc, kernelValues);
-
// Expected output is a single-batch, 1 channel, 3x3 image.
- armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
+ armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, armnn::DataType::Float32);
// Expected output (with results if calculated as FP32 in the comments)
const std::vector<float> outputData =
@@ -1479,8 +1491,6 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
-20.625f // -20.63477281
};
- boost::multi_array<float, 4> expectedOutput = MakeTensor<float, 4>(outputDesc, outputData);
-
uint32_t padLeft = 1;
uint32_t padTop = 1;
uint32_t padRight = 1;
@@ -1493,10 +1503,13 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
workloadFactory,
memoryManager,
tensorHandleFactory,
- input,
- kernel,
- boost::multi_array<float, 1>(),
- expectedOutput,
+ inputValues,
+ kernelValues,
+ std::vector<float>(),
+ outputData,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
dataLayout,
1.0f,
0,
@@ -1551,8 +1564,6 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
},
1.0f, 0);
- auto input = MakeTensor<armnn::BFloat16, 4>(inputDesc, inputValues);
-
// Use a 3x3 kernel.
armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
@@ -1570,8 +1581,6 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
},
1.0f, 0);
- auto kernel = MakeTensor<armnn::BFloat16, 4>(kernelDesc, kernelValues);
-
// Expected output is a single-batch, 1 channel, 3x3 image.
armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
@@ -1589,8 +1598,6 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
-0.0346679688f // -0.034808
};
- boost::multi_array<float, 4> expectedOutput = MakeTensor<float, 4>(outputDesc, outputData);
-
uint32_t padLeft = 1;
uint32_t padTop = 1;
uint32_t padRight = 1;
@@ -1603,10 +1610,13 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
workloadFactory,
memoryManager,
tensorHandleFactory,
- input,
- kernel,
- boost::multi_array<float, 1>(),
- expectedOutput,
+ inputValues,
+ kernelValues,
+ std::vector<float>(),
+ outputData,
+ inputDesc.GetShape(),
+ kernelDesc.GetShape(),
+ outputDesc.GetShape(),
dataLayout,
1.0f,
0,
@@ -1628,10 +1638,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 4>& input,
- const boost::multi_array<T, 4>& kernel,
- const boost::multi_array<B, 1>& bias,
- const boost::multi_array<T, 4>& outputExpected,
+ const std::vector<T>& input,
+ const std::vector<T>& kernel,
+ const std::vector<B>& bias,
+ const std::vector<T>& outputExpected,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& kernelShape,
+ const armnn::TensorShape& outputExpectedShape,
float qScale,
int32_t qOffset,
const armnn::DataLayout layout,
@@ -1642,18 +1655,18 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
uint32_t strideX = 1,
uint32_t strideY = 1)
{
- unsigned int inputNum = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[2]);
- unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[3]);
- unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernel.shape()[0]);
- unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernel.shape()[1]);
- unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernel.shape()[2]);
- unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernel.shape()[3]);
- unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
- unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[2]);
- unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[1]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
+ unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernelShape[0]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[1]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[2]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[3]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
// If a bias is used, its size must equal the number of output channels.
bool biasEnabled = bias.size() > 0;
@@ -1693,8 +1706,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
inputData = tmp;
}
- auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
// Construct the output data, with bias applied, as appropriate.
std::vector<T> outputData;
outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
@@ -1707,7 +1718,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
outputWidth, outputHeight);
}
- LayerTestResult<T, 4> ret(outputTensorInfo);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
// At this point if we require it permute the expected output
if (layout == armnn::DataLayout::NHWC)
@@ -1717,19 +1728,17 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
outputData = tmp;
}
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
-
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
armnn::ScopedTensorHandle biasTensor(biasDesc);
if (biasEnabled)
{
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
}
armnn::DepthwiseConvolution2dQueueDescriptor data;
@@ -1752,13 +1761,16 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1829,14 +1841,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
inputData = tmp;
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
biasDesc.GetQuantizationScale(),
biasDesc.GetQuantizationOffset()));
- auto bias = MakeTensor<B, 1>(biasDesc, biasV);
-
std::vector<T> kernelData = std::vector<T>(
QuantizedVector<T>({
1.f, 0.f, 1.f,
@@ -1850,8 +1859,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
kernelDesc.GetQuantizationScale(),
kernelDesc.GetQuantizationOffset()));
- auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
-
// Manually calculated.
std::vector<T> outputImage(
QuantizedVector<T>({ 0.f, 0.f },
@@ -1867,7 +1874,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
outputWidth, outputHeight);
}
- LayerTestResult<T, 4> ret(outputTensorInfo);
if (layout == armnn::DataLayout::NHWC)
{
std::vector<T> tmp(outputImage.size());
@@ -1875,7 +1881,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
outputImage = tmp;
}
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -1885,8 +1891,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
armnn::ScopedTensorHandle biasTensor(biasDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
+ AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -1906,13 +1912,16 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputImage,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1994,14 +2003,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
originalInputData.data(), inputData.data(), sizeof(T));
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
biasDesc.GetQuantizationScale(),
biasDesc.GetQuantizationOffset());
- auto bias = MakeTensor<B, 1>(biasDesc, biasV);
-
std::vector<T> kernelData = std::vector<T>(
QuantizedVector<T>({
1, 1, 1,
@@ -2031,8 +2037,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
kernelDesc.GetQuantizationScale(),
kernelDesc.GetQuantizationOffset()));
- auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
-
// Manually calculated.
std::vector<T> originalOutputImage = std::vector<T>(
QuantizedVector<T>({
@@ -2080,7 +2084,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
outputHeight);
}
- LayerTestResult<T, 4> ret(outputTensorInfo);
std::vector<T> outputImage = originalOutputImage;
if (layout == armnn::DataLayout::NHWC)
{
@@ -2088,7 +2091,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
originalOutputImage.data(), outputImage.data(), sizeof(T));
}
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -2098,8 +2101,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
armnn::ScopedTensorHandle biasTensor(biasDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
+ AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2119,13 +2122,17 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 4>(actualOutput,
+ outputImage,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
- return ret;
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
@@ -2134,10 +2141,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 4>& originalInput,
- const boost::multi_array<T, 4>& originalKernel,
- const boost::multi_array<B, 1>& bias,
- const boost::multi_array<T, 4>& originalOutputExpected,
+ const std::vector<T>& originalInput,
+ const std::vector<T>& originalKernel,
+ const std::vector<B>& bias,
+ const std::vector<T>& originalOutputExpected,
+ const armnn::TensorShape& originalInputShape,
+ const armnn::TensorShape& originalKernelShape,
+ const armnn::TensorShape& originalOutputExpectedShape,
float qScale,
int32_t qOffset,
const armnn::DataLayout layout = armnn::DataLayout::NCHW,
@@ -2150,20 +2160,20 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
uint32_t dilationX = 1,
uint32_t dilationY = 1)
{
- unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInput.shape()[2]);
- unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInput.shape()[3]);
- unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInput.shape()[1]);
- unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInput.shape()[0]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
+ unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
- unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
- unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
- unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
- unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
+ unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
- unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernel.shape()[2]);
- unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernel.shape()[3]);
- unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernel.shape()[1]);
- unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+ unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
+ unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
+ unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
+ unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernelShape[0]);
bool biasEnabled = bias.size() > 0;
@@ -2199,8 +2209,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
biasDesc.SetQuantizationOffset(0);
}
- LayerTestResult<T, 4> ret(outputTensorInfo);
-
// Construct input data
std::vector<T> input;
input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
@@ -2217,8 +2225,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
inputData = tmp;
}
- auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
std::vector<T> output;
output.assign(originalOutputExpected.data(),
originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
@@ -2233,6 +2239,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
outputWidth, outputHeight);
}
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
// Construct expected output data
std::vector<T> outputData;
outputData.insert(outputData.end(), output.begin(), output.end());
@@ -2245,7 +2253,6 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
outputData = tmp;
}
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -2255,12 +2262,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
armnn::ScopedTensorHandle biasTensor(biasDesc);
- boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, originalKernel.data());
if(biasEnabled)
{
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
}
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -2283,13 +2289,16 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
@@ -2305,8 +2314,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
{
// Use a single-batch 2-channel 5x5 image as input.
armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 12, 13, 14,
@@ -2320,12 +2329,12 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
45, 46, 47, 48, 49
},
inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset())));
+ inputTensorInfo.GetQuantizationOffset());
// Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
- auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ auto kernel = QuantizedVector<T>(
+ {
32, 31, 30, 29,
28, 27, 26, 25,
24, 23, 22, 21,
@@ -2337,13 +2346,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
4, 3, 2, 1
},
kernelTensorInfo.GetQuantizationScale(),
- kernelTensorInfo.GetQuantizationOffset())));
+ kernelTensorInfo.GetQuantizationOffset());
// Expected output is 1 batch of a 2-channel 5x5 image.
// Calculated using the python tensorflow library with strideX=1, strideY=1.
armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ auto expectedOutput = QuantizedVector<T>(
+ {
1062, 1580, 1850, 1530, 1117,
2140, 3108, 3500, 2842, 2042,
3580, 5068, 5460, 4342, 3062,
@@ -2357,7 +2366,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
3100, 4352, 4452, 3517, 2465
},
outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset())));
+ outputTensorInfo.GetQuantizationOffset());
return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -2367,6 +2376,9 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
expectedOutput,
+ inputTensorInfo.GetShape(),
+ kernelTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
qScale,
qOffset,
layout,
@@ -2391,8 +2403,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
auto layout = armnn::DataLayout::NHWC;
armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 12, 13, 14,
@@ -2406,11 +2418,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
45, 46, 47, 48, 49
},
inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset())));
+ inputTensorInfo.GetQuantizationOffset());
armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
- auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ auto kernel = QuantizedVector<T>(
+ {
32, 31, 30, 29,
28, 27, 26, 25,
24, 23, 22, 21,
@@ -2422,11 +2434,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
4, 3, 2, 1
},
kernelTensorInfo.GetQuantizationScale(),
- kernelTensorInfo.GetQuantizationOffset())));
+ kernelTensorInfo.GetQuantizationOffset());
armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ auto expectedOutput = QuantizedVector<T>(
+ {
1062, 1580, 1850, 1530, 1117,
2140, 3108, 3500, 2842, 2042,
3580, 5068, 5460, 4342, 3062,
@@ -2440,7 +2452,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
3100, 4352, 4452, 3517, 2465
},
outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset())));
+ outputTensorInfo.GetQuantizationOffset());
return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -2450,6 +2462,9 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
expectedOutput,
+ inputTensorInfo.GetShape(),
+ kernelTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
qScale,
qOffset,
layout,
@@ -2473,9 +2488,9 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
{
auto layout = armnn::DataLayout::NHWC;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
+ auto input = QuantizedVector<T>(
+ {
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2487,17 +2502,17 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
0, 0, 0, 0, 0, 0, 0, 0, 0
},
inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset())));
+ inputTensorInfo.GetQuantizationOffset());
- armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
- auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
+ auto kernel = QuantizedVector<T>(
+ {
1, 2, 3,
4, 5, 6,
7, 8, 9
},
kernelTensorInfo.GetQuantizationScale(),
- kernelTensorInfo.GetQuantizationOffset())));
+ kernelTensorInfo.GetQuantizationOffset());
uint32_t padLeft = 0;
uint32_t padTop = 0;
@@ -2509,15 +2524,15 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
uint32_t dilationY = 3;
// Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
- boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
- QuantizedVector<T>({
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
+ auto expectedOutput = QuantizedVector<T>(
+ {
5, 5, 5,
5, 5, 5,
5, 5, 5
},
outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset())));
+ outputTensorInfo.GetQuantizationOffset());
return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
@@ -2527,6 +2542,9 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
expectedOutput,
+ inputTensorInfo.GetShape(),
+ kernelTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
qScale,
qOffset,
layout,
@@ -2589,19 +2607,15 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
outputTensorInfo.SetQuantizationScale(qScale);
outputTensorInfo.SetQuantizationOffset(qOffset);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset())));
- auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
- std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
- kernelTensorInfo.GetQuantizationScale(),
- kernelTensorInfo.GetQuantizationOffset())));
- auto expectedOutput =
- MakeTensor<T, 4>(outputTensorInfo,
- std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset())));
+ auto input = QuantizedVector<T>(inputNoQuantizedValues,
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset());
+ auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
+ kernelTensorInfo.GetQuantizationScale(),
+ kernelTensorInfo.GetQuantizationOffset());
+ auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset());
uint32_t padLeft = 0;
uint32_t padTop = 0;
@@ -2618,6 +2632,9 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
kernel,
GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
expectedOutput,
+ inputTensorInfo.GetShape(),
+ kernelTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
qScale,
qOffset,
layout,
@@ -2965,7 +2982,6 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::TensorInfo kernelDesc;
armnn::TensorInfo biasDesc;
-
std::vector<unsigned int> inputShape;
std::vector<unsigned int> outputShape;
std::vector<unsigned int> kernelShape{ channelMultiplier, inputChannels, kernelHeight, kernelWidth };
@@ -2992,15 +3008,14 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
- biasDesc = armnn::TensorInfo(
- 1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
+ biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
- LayerTestResult<T, 4> ret(outputTensorInfo);
+ auto input = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
+ auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
+ auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
- auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908, 0.0f, 255.0f);
- auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f);
- auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(
- biasDesc, 1028, 0.0f, 255.0f);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -3010,8 +3025,8 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::ScopedTensorHandle weightsTensor(kernelDesc);
armnn::ScopedTensorHandle biasTensor(biasDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
+ AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -3043,18 +3058,21 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
workloadRef->PostAllocationConfigure();
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
//
@@ -3486,6 +3504,8 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
}
+ std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
+
Convolution2dDescriptor descriptor;
descriptor.m_StrideX = 1;
descriptor.m_StrideY = 1;
@@ -3496,11 +3516,9 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
descriptor.m_BiasEnabled = true;
descriptor.m_DataLayout = layout;
-
std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
-
WorkloadInfo workloadInfo;
ScopedTensorHandle weightTensor(kernelInfo);
ScopedTensorHandle biasTensor(biasInfo);
@@ -3524,11 +3542,12 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
ExecuteWorkload(*workload, memoryManager);
- LayerTestResult<uint8_t, 4> ret(outputInfo);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
- ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, 4>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
LayerTestResult<float,4> CompareConvolution2dTest(
@@ -3580,7 +3599,7 @@ LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
- auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
+ std::vector<float> input = { 1.f, 2.f, 3.f, 4.f };
std::vector<float> kernelData;
std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
@@ -3589,20 +3608,21 @@ LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
}
armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
- auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
std::vector<float> expectedOutputData(64, 0.f);
armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
- auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory,
memoryManager,
tensorHandleFactory,
input,
- kernel,
- boost::multi_array<float, 1>(),
- expectedOutput,
+ kernelData,
+ std::vector<float>(),
+ expectedOutputData,
+ inputTensorInfo.GetShape(),
+ kernelTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
0.f,
0,
armnn::DataLayout::NCHW);
@@ -3740,6 +3760,8 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
}
+ std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
+
DepthwiseConvolution2dDescriptor descriptor;
descriptor.m_StrideX = 1;
descriptor.m_StrideY = 1;
@@ -3780,10 +3802,12 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
LayerTestResult<uint8_t, 4> ret(outputInfo);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
- ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, 4>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
index fdc6220d51..b16ce47c8f 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
@@ -23,16 +23,16 @@ LayerTestResult<float, 4> ConvertBf16ToFp32Test(
std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
{
-37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
},
1.0f, 0);
- auto input = MakeTensor<armnn::BFloat16, 4>(inputTensorInfo, std::vector<armnn::BFloat16>(inputValues));
-
- LayerTestResult<float, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput =
+ {
+ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ };
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -47,11 +47,14 @@ LayerTestResult<float, 4> ConvertBf16ToFp32Test(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index 8745a5293b..177acef772 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -24,14 +24,19 @@ LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
- auto input = MakeTensor<armnn::Half, 4>(inputTensorInfo,
- { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
- 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+ std::vector<armnn::Half> input =
+ {
+ -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+ 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h
+ };
- LayerTestResult<float, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+ std::vector<float> expectedOutput =
+ {
+ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -46,11 +51,14 @@ LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
index db832594cd..9ab3746b61 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
@@ -20,8 +20,9 @@ LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32);
const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16);
- auto input = MakeTensor<float, 4>(inputTensorInfo,
- { -37.5f, -15.2f, -8.76f,
+ std::vector<float> input =
+ {
+ -37.5f, -15.2f, -8.76f,
-2.0f, -1.5f, -1.3f,
-0.5f, -0.4f, 0.0f,
1.0f, 0.4f, 0.5f,
@@ -33,13 +34,13 @@ LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
-3.8f, // 0xC0733333 Round down
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
- });
+ };
- std::vector<armnn::BFloat16> outputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
+ std::vector<armnn::BFloat16> expectedOutput = armnnUtils::QuantizedVector<armnn::BFloat16>(
{
- -37.5f, -15.2f, -8.76f,
- -2.0f, -1.5f, -1.3f,
- -0.5f, -0.4f, 0.0f,
+ -37.5f, -15.2f, -8.76f,
+ -2.0f, -1.5f, -1.3f,
+ -0.5f, -0.4f, 0.0f,
1.0f, 0.4f, 0.5f,
1.3f, 1.5f, 2.0f,
8.76f, 15.2f, 37.5f,
@@ -52,8 +53,7 @@ LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
},
1.0f, 0);
- LayerTestResult<armnn::BFloat16, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<armnn::BFloat16, 4>(outputTensorInfo, outputValues);
+ std::vector<armnn::BFloat16> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -68,11 +68,15 @@ LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<armnn::BFloat16, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
- return ret;
}
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 5fbec56435..9946801aab 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -22,14 +22,19 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
- auto input = MakeTensor<float, 4>(inputTensorInfo,
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+ std::vector<float> input =
+ {
+ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ };
- LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
- { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
- 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+ std::vector<armnn::Half> expectedOutput =
+ {
+ -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+ 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h
+ };
+
+ std::vector<armnn::Half> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -44,11 +49,14 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<armnn::Half, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index f2127c0f0c..97204750d0 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -40,12 +40,10 @@ LayerTestResult<T, Dim> DebugTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, Dim> input =
- MakeTensor<T, Dim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
- LayerTestResult<T, Dim> ret(outputTensorInfo);
- ret.outputExpected =
- MakeTensor<T, Dim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
@@ -73,9 +71,12 @@ LayerTestResult<T, Dim> DebugTestImpl(
BOOST_TEST(oss.str() == expectedStringOutput);
- CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, Dim>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index a2a5483844..7495c6b5b3 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -44,12 +44,10 @@ LayerTestResult<T, 4> DepthToSpaceTestImpl(
outputInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, 4> input =
- MakeTensor<T, 4>(inputInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
- LayerTestResult<T, 4> result(outputInfo);
- result.outputExpected =
- MakeTensor<T, 4>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset));
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
@@ -65,12 +63,16 @@ LayerTestResult<T, 4> DepthToSpaceTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
- return result;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index f60b42cae5..924844d92f 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -27,10 +27,8 @@ LayerTestResult<T1, Dim> DequantizeTestImpl(
armnn::DequantizeQueueDescriptor descriptor)
{
IgnoreUnused(memoryManager);
- boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
- LayerTestResult<T1, Dim> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T1, Dim>(outputTensorInfo, expectedOutputData);
+ std::vector<T1> actualOutput(outputTensorInfo.GetNumElements());
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
@@ -46,13 +44,16 @@ LayerTestResult<T1, Dim> DequantizeTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T1, Dim>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template <armnn::DataType ArmnnInputType,
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index c6636554ea..143f9e06b1 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -155,23 +155,15 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
- auto boxEncodings = MakeTensor<T, 3>(boxEncodingsInfo, boxEncodingsData);
- auto scores = MakeTensor<T, 3>(scoresInfo, scoresData);
- auto anchors = MakeTensor<T, 2>(anchorsInfo, anchorsData);
-
armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32);
- armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32);
armnn::TensorInfo detectionClassesInfo({ 1, 3 }, armnn::DataType::Float32);
+ armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32);
armnn::TensorInfo numDetectionInfo({ 1 }, armnn::DataType::Float32);
- LayerTestResult<float, 3> detectionBoxesResult(detectionBoxesInfo);
- detectionBoxesResult.outputExpected = MakeTensor<float, 3>(detectionBoxesInfo, expectedDetectionBoxes);
- LayerTestResult<float, 2> detectionClassesResult(detectionClassesInfo);
- detectionClassesResult.outputExpected = MakeTensor<float, 2>(detectionClassesInfo, expectedDetectionClasses);
- LayerTestResult<float, 2> detectionScoresResult(detectionScoresInfo);
- detectionScoresResult.outputExpected = MakeTensor<float, 2>(detectionScoresInfo, expectedDetectionScores);
- LayerTestResult<float, 1> numDetectionsResult(numDetectionInfo);
- numDetectionsResult.outputExpected = MakeTensor<float, 1>(numDetectionInfo, expectedNumDetections);
+ std::vector<float> actualDetectionBoxesOutput(detectionBoxesInfo.GetNumElements());
+ std::vector<float> actualDetectionClassesOutput(detectionClassesInfo.GetNumElements());
+ std::vector<float> actualDetectionScoresOutput(detectionScoresInfo.GetNumElements());
+ std::vector<float> actualNumDetectionOutput(numDetectionInfo.GetNumElements());
auto boxedHandle = tensorHandleFactory.CreateTensorHandle(boxEncodingsInfo);
auto scoreshandle = tensorHandleFactory.CreateTensorHandle(scoresInfo);
@@ -182,7 +174,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo);
armnn::ScopedTensorHandle anchorsTensor(anchorsInfo);
- AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]);
+ AllocateAndCopyDataToITensorHandle(&anchorsTensor, anchorsData.data());
armnn::DetectionPostProcessQueueDescriptor data;
data.m_Parameters.m_UseRegularNms = useRegularNms;
@@ -200,7 +192,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, boxEncodingsInfo, boxedHandle.get());
- AddInputToWorkload(data, info, scoresInfo, scoreshandle.get());
+ AddInputToWorkload(data, info, scoresInfo, scoreshandle.get());
AddOutputToWorkload(data, info, detectionBoxesInfo, outputBoxesHandle.get());
AddOutputToWorkload(data, info, detectionClassesInfo, classesHandle.get());
AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get());
@@ -215,23 +207,38 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
outputScoresHandle->Allocate();
numDetectionHandle->Allocate();
- CopyDataToITensorHandle(boxedHandle.get(), boxEncodings.origin());
- CopyDataToITensorHandle(scoreshandle.get(), scores.origin());
+ CopyDataToITensorHandle(boxedHandle.get(), boxEncodingsData.data());
+ CopyDataToITensorHandle(scoreshandle.get(), scoresData.data());
workload->Execute();
- CopyDataFromITensorHandle(detectionBoxesResult.output.origin(), outputBoxesHandle.get());
- CopyDataFromITensorHandle(detectionClassesResult.output.origin(), classesHandle.get());
- CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get());
- CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get());
+ CopyDataFromITensorHandle(actualDetectionBoxesOutput.data(), outputBoxesHandle.get());
+ CopyDataFromITensorHandle(actualDetectionClassesOutput.data(), classesHandle.get());
+ CopyDataFromITensorHandle(actualDetectionScoresOutput.data(), outputScoresHandle.get());
+ CopyDataFromITensorHandle(actualNumDetectionOutput.data(), numDetectionHandle.get());
- auto result = CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected);
+ auto result = CompareTensors(actualDetectionBoxesOutput,
+ expectedDetectionBoxes,
+ outputBoxesHandle->GetShape(),
+ detectionBoxesInfo.GetShape());
BOOST_TEST(result.m_Result, result.m_Message.str());
- result = CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected);
+
+ result = CompareTensors(actualDetectionClassesOutput,
+ expectedDetectionClasses,
+ classesHandle->GetShape(),
+ detectionClassesInfo.GetShape());
BOOST_TEST(result.m_Result, result.m_Message.str());
- result = CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected);
+
+ result = CompareTensors(actualDetectionScoresOutput,
+ expectedDetectionScores,
+ outputScoresHandle->GetShape(),
+ detectionScoresInfo.GetShape());
BOOST_TEST(result.m_Result, result.m_Message.str());
- result = CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected);
+
+ result = CompareTensors(actualNumDetectionOutput,
+ expectedNumDetections,
+ numDetectionHandle->GetShape(),
+ numDetectionInfo.GetShape());
BOOST_TEST(result.m_Result, result.m_Message.str());
}
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
index ec5bfb0396..88f34f6add 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
@@ -25,43 +25,38 @@
template<typename DescriptorType>
std::unique_ptr<armnn::IWorkload> CreateWorkload(
- const armnn::IWorkloadFactory& workloadFactory,
- const armnn::WorkloadInfo& info,
- const DescriptorType& descriptor)
-{
+ const armnn::IWorkloadFactory& workloadFactory,
+ const armnn::WorkloadInfo& info,
+ const DescriptorType& descriptor) {
return CreateWorkload(workloadFactory, info, descriptor);
}
-template <std::size_t NumDims,
- typename Descriptor,
- armnn::DataType ArmnnTypeInput,
- armnn::DataType ArmnnTypeOutput,
- typename TInput = armnn::ResolveType<ArmnnTypeInput>,
- typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
+template<std::size_t NumDims,
+ typename Descriptor,
+ armnn::DataType ArmnnTypeInput,
+ armnn::DataType ArmnnTypeOutput,
+ typename TInput = armnn::ResolveType<ArmnnTypeInput>,
+ typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[NumDims],
- std::vector<TInput> values0,
- float quantScale0,
- int quantOffset0,
- const unsigned int shape1[NumDims],
- std::vector<TInput> values1,
- float quantScale1,
- int quantOffset1,
- const unsigned int outShape[NumDims],
- std::vector<TOutput> outValues,
- const armnn::ITensorHandleFactory& tensorHandleFactory,
- float outQuantScale,
- int outQuantOffset)
-{
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const unsigned int shape0[NumDims],
+ std::vector<TInput> values0,
+ float quantScale0,
+ int quantOffset0,
+ const unsigned int shape1[NumDims],
+ std::vector<TInput> values1,
+ float quantScale1,
+ int quantOffset1,
+ const unsigned int outShape[NumDims],
+ std::vector<TOutput> outValues,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float outQuantScale,
+ int outQuantOffset) {
armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
- auto input0 = MakeTensor<TInput, NumDims>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<TInput, NumDims>(inputTensorInfo1, values1);
-
inputTensorInfo0.SetQuantizationScale(quantScale0);
inputTensorInfo0.SetQuantizationOffset(quantOffset0);
@@ -71,11 +66,12 @@ LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
outputTensorInfo.SetQuantizationScale(outQuantScale);
outputTensorInfo.SetQuantizationOffset(outQuantOffset);
- LayerTestResult<TOutput, NumDims> ret(outputTensorInfo);
+ std::vector<TOutput> actualOutput(outputTensorInfo.GetNumElements());
- if(ArmnnTypeOutput == armnn::DataType::Boolean)
+ bool isBoolean = false;
+ if (ArmnnTypeOutput == armnn::DataType::Boolean)
{
- ret.compareBoolean = true;
+ isBoolean = true;
}
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
@@ -93,121 +89,121 @@ LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
inputHandle1->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
- CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
+ CopyDataToITensorHandle(inputHandle0.get(), values0.data());
+ CopyDataToITensorHandle(inputHandle1.get(), values1.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- ret.outputExpected = MakeTensor<TOutput, NumDims>(outputTensorInfo, outValues);
- return ret;
+ return LayerTestResult<TOutput, NumDims>(actualOutput,
+ outValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape(),
+ isBoolean);
}
-template <std::size_t NumDims,
- typename Descriptor,
- armnn::DataType ArmnnType,
- typename T = armnn::ResolveType<ArmnnType>>
+template<std::size_t NumDims,
+ typename Descriptor,
+ armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, NumDims> ElementwiseTestHelper(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[NumDims],
- std::vector<T> values0,
- float quantScale0,
- int quantOffset0,
- const unsigned int shape1[NumDims],
- std::vector<T> values1,
- float quantScale1,
- int quantOffset1,
- const unsigned int outShape[NumDims],
- std::vector<T> outValues,
- const armnn::ITensorHandleFactory& tensorHandleFactory,
- float outQuantScale,
- int outQuantOffset)
-{
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const unsigned int shape0[NumDims],
+ std::vector<T> values0,
+ float quantScale0,
+ int quantOffset0,
+ const unsigned int shape1[NumDims],
+ std::vector<T> values1,
+ float quantScale1,
+ int quantOffset1,
+ const unsigned int outShape[NumDims],
+ std::vector<T> outValues,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float outQuantScale,
+ int outQuantOffset) {
return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
- workloadFactory,
- memoryManager,
- shape0,
- values0,
- quantScale0,
- quantOffset0,
- shape1,
- values1,
- quantScale1,
- quantOffset1,
- outShape,
- outValues,
- tensorHandleFactory,
- outQuantScale,
- outQuantOffset);
+ workloadFactory,
+ memoryManager,
+ shape0,
+ values0,
+ quantScale0,
+ quantOffset0,
+ shape1,
+ values1,
+ quantScale1,
+ quantOffset1,
+ outShape,
+ outValues,
+ tensorHandleFactory,
+ outQuantScale,
+ outQuantOffset);
}
-template <std::size_t NumDims,
- typename Descriptor,
- armnn::DataType ArmnnTypeInput,
- armnn::DataType ArmnnTypeOutput,
- typename TInput = armnn::ResolveType<ArmnnTypeInput>,
- typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
+template<std::size_t NumDims,
+ typename Descriptor,
+ armnn::DataType ArmnnTypeInput,
+ armnn::DataType ArmnnTypeOutput,
+ typename TInput = armnn::ResolveType<ArmnnTypeInput>,
+ typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[NumDims],
- std::vector<TInput> values0,
- const unsigned int shape1[NumDims],
- std::vector<TInput> values1,
- const unsigned int outShape[NumDims],
- std::vector<TOutput> outValues,
- const armnn::ITensorHandleFactory& tensorHandleFactory,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const unsigned int shape0[NumDims],
+ std::vector<TInput> values0,
+ const unsigned int shape1[NumDims],
+ std::vector<TInput> values1,
+ const unsigned int outShape[NumDims],
+ std::vector<TOutput> outValues,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float quantScale = 1.0f,
+ int quantOffset = 0) {
return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
- workloadFactory,
- memoryManager,
- shape0,
- values0,
- quantScale,
- quantOffset,
- shape1,
- values1,
- quantScale,
- quantOffset,
- outShape,
- outValues,
- tensorHandleFactory,
- quantScale,
- quantOffset);
+ workloadFactory,
+ memoryManager,
+ shape0,
+ values0,
+ quantScale,
+ quantOffset,
+ shape1,
+ values1,
+ quantScale,
+ quantOffset,
+ outShape,
+ outValues,
+ tensorHandleFactory,
+ quantScale,
+ quantOffset);
}
-template <std::size_t NumDims,
- typename Descriptor,
- armnn::DataType ArmnnType,
- typename T = armnn::ResolveType<ArmnnType>>
+template<std::size_t NumDims,
+ typename Descriptor,
+ armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, NumDims> ElementwiseTestHelper(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[NumDims],
- std::vector<T> values0,
- const unsigned int shape1[NumDims],
- std::vector<T> values1,
- const unsigned int outShape[NumDims],
- std::vector<T> outValues,
- const armnn::ITensorHandleFactory& tensorHandleFactory,
- float quantScale = 1.0f,
- int quantOffset = 0)
-{
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const unsigned int shape0[NumDims],
+ std::vector<T> values0,
+ const unsigned int shape1[NumDims],
+ std::vector<T> values1,
+ const unsigned int outShape[NumDims],
+ std::vector<T> outValues,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float quantScale = 1.0f,
+ int quantOffset = 0) {
return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
- workloadFactory,
- memoryManager,
- shape0,
- values0,
- shape1,
- values1,
- outShape,
- outValues,
- tensorHandleFactory,
- quantScale,
- quantOffset);
+ workloadFactory,
+ memoryManager,
+ shape0,
+ values0,
+ shape1,
+ values1,
+ outShape,
+ outValues,
+ tensorHandleFactory,
+ quantScale,
+ quantOffset);
} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
index 5d37e934ea..20e341b4e2 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
@@ -55,9 +55,9 @@ LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
outputTensorInfo.SetQuantizationScale(outQuantScale);
outputTensorInfo.SetQuantizationOffset(outQuantOffset);
- auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo));
-
- LayerTestResult<T, NumDims> ret(outputTensorInfo);
+ std::vector<T> input = ConvertToDataType<ArmnnType>(values, inputTensorInfo);
+ std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(outValues, inputTensorInfo);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -73,16 +73,18 @@ LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, NumDims>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
- ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues,
- inputTensorInfo));
- return ret;
}
template <std::size_t NumDims,
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index 157df99d64..bbe481657d 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -22,16 +22,17 @@ LayerTestResult<float, 2> FakeQuantizationTest(
constexpr unsigned int width = 2;
constexpr unsigned int height = 3;
- const armnn::TensorInfo tensorInfo({height, width },
- armnn::DataType::Float32);
+ const armnn::TensorInfo tensorInfo({ height, width }, armnn::DataType::Float32);
- auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+ std::vector<float> input =
+ {
-10.0f, -5.0f,
0.0f, 5.0f,
10.0f, 10.0f
- }));
+ };
- LayerTestResult<float, 2> ret(tensorInfo);
+ std::vector<float> actualOutput(tensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(tensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
@@ -48,7 +49,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
data.m_Parameters.m_Min = min;
data.m_Parameters.m_Max = max;
- armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+ armnn::PassthroughTensorHandle refHandle(tensorInfo, expectedOutput.data());
armnn::FakeQuantizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
@@ -58,18 +59,22 @@ LayerTestResult<float, 2> FakeQuantizationTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+ expectedOutput =
+ {
0.0f, 63.0f,
128.0f, 191.0f,
255.0f, 255.0f
- }));
+ };
- return ret;
+ return LayerTestResult<float, 2>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ tensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
index f96d33735f..9208a311a7 100644
--- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
@@ -21,15 +21,15 @@ LayerTestResult<T, 4> SimpleFillTest(
armnn::TensorInfo inputTensorInfo({4}, armnn::DataType::Signed32);
armnn::TensorInfo outputTensorInfo({2, 2, 3, 2}, ArmnnType);
- auto input = MakeTensor<int32_t, 1>(inputTensorInfo, ConvertToDataType<armnn::DataType::Signed32>(
- {2, 2, 3, 2},
- inputTensorInfo));
+ std::vector<int32_t> input = ConvertToDataType<armnn::DataType::Signed32>( { 2, 2, 3, 2 }, inputTensorInfo);
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, ConvertToDataType<ArmnnType>(
- { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
- 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f },
- outputTensorInfo));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(
+ {
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f
+ },
+ outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -45,13 +45,16 @@ LayerTestResult<T, 4> SimpleFillTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
//
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index 46f384266b..bf871ae2f4 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -24,16 +24,20 @@ LayerTestResult<T, 4> SimpleFloorTest(
armnn::TensorInfo outputTensorInfo(inputTensorInfo);
outputTensorInfo.SetQuantizationScale(0.1f);
- auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
- 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f },
- inputTensorInfo));
-
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, ConvertToDataType<ArmnnType>(
- { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
- 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f },
- outputTensorInfo));
+ std::vector<T> input = ConvertToDataType<ArmnnType>(
+ {
+ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+ },
+ inputTensorInfo);
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(
+ {
+ -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
+ 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f
+ },
+ outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -48,13 +52,16 @@ LayerTestResult<T, 4> SimpleFloorTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
//
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index cd7f4efe31..c47048e566 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -29,9 +29,9 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::TensorInfo outputTensorInfo,
armnn::TensorInfo weightsDesc,
armnn::TensorInfo biasesDesc,
- boost::multi_array<T, 2>& weights,
- boost::multi_array<B, 1>& bias,
- boost::multi_array<T, 4>& input,
+ std::vector<T>& weights,
+ std::vector<B>& bias,
+ std::vector<T>& input,
bool biasEnabled,
bool transposeWeights)
{
@@ -43,8 +43,10 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::ScopedTensorHandle weightsTensor(weightsDesc);
armnn::ScopedTensorHandle biasTensor(biasesDesc);
- AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
- AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
+ AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -58,11 +60,12 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ result.m_ActualData = actualOutput;
return result;
}
@@ -76,9 +79,9 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestWeightsAsInputsImpl(
armnn::TensorInfo outputTensorInfo,
armnn::TensorInfo weightsTensorInfo,
armnn::TensorInfo biasesTensorInfo,
- boost::multi_array<T, 2>& weights,
- boost::multi_array<B, 1>& bias,
- boost::multi_array<T, 4>& input,
+ std::vector<T>& weights,
+ std::vector<B>& bias,
+ std::vector<T>& input,
bool biasEnabled,
bool transposeWeights)
{
@@ -86,6 +89,8 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestWeightsAsInputsImpl(
std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
armnn::FullyConnectedQueueDescriptor data;
armnn::WorkloadInfo info;
@@ -109,17 +114,18 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestWeightsAsInputsImpl(
input0Handle->Allocate();
input1Handle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(input0Handle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(input1Handle.get(), &weights[0][0]);
+ CopyDataToITensorHandle(input0Handle.get(), input.data());
+ CopyDataToITensorHandle(input1Handle.get(), weights.data());
if (biasEnabled)
{
input2Handle->Allocate();
- CopyDataToITensorHandle(input2Handle.get(), &bias[0]);
+ CopyDataToITensorHandle(input2Handle.get(), bias.data());
}
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ result.m_ActualData = actualOutput;
return result;
}
@@ -158,21 +164,21 @@ LayerTestResult<T, 2> FullyConnectedTest(
LayerTestResult<T, 2> result(outputTensorInfo);
- auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ std::vector<T> input = ConvertToDataType<ArmnnType>(
{
-1.2f, 6.1f, -3.5f,
18.8f, -5.5f, 2.9f
},
- inputTensorInfo));
+ inputTensorInfo);
- auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
+ std::vector<T> weights = ConvertToDataType<ArmnnType>(
{
-8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
},
- weightsDesc));
+ weightsDesc);
- auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
+ std::vector<int32_t> bias = {9250, 67500};
if (constantWeights)
{
@@ -207,13 +213,11 @@ LayerTestResult<T, 2> FullyConnectedTest(
if (biasEnabled)
{
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
+ result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
}
else
{
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
+ result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
}
return result;
@@ -274,22 +278,19 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
LayerTestResult<T, 2> result(outputTensorInfo);
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>({
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
},
- qScale, qOffset)
- );
+ qScale, qOffset);
- boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
- armnnUtils::QuantizedVector<T>({
+ std::vector<T> weights = armnnUtils::QuantizedVector<T>(
+ {
2.0f, 3.0f, 4.0f, 5.0f, 6.0f
},
- qScale, qOffset)
- );
+ qScale, qOffset);
std::vector<T> biasValues({900000.f});
- boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
result = SimpleFullyConnectedTestImpl<T>(
workloadFactory,
@@ -297,12 +298,11 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
tensorHandleFactory,
inputTensorInfo, outputTensorInfo,
weightsDesc, biasesDesc,
- weights, bias, input,
+ weights, biasValues, input,
true, transposeWeights
);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
+ result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
return result;
}
@@ -370,40 +370,36 @@ LayerTestResult<float, 2> FullyConnectedFloat32Test(
LayerTestResult<float, 2> result(outputTensorInfo);
- boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
- {
- 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
-
- 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
- })
- );
+ std::vector<float> input =
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+ 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+ };
- boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
- {
- .5f, 2.f, .5f,
- .5f, 2.f, 1.f,
- .5f, 2.f, 2.f,
- .5f, 2.f, 3.f,
- .5f, 2.f, 4.f
- }));
+ std::vector<float> weights =
+ {
+ .5f, 2.f, .5f,
+ .5f, 2.f, 1.f,
+ .5f, 2.f, 2.f,
+ .5f, 2.f, 3.f,
+ .5f, 2.f, 4.f
+ };
if (transposeWeights)
{
- weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
+ weights =
{
.5f, .5f, .5f, .5f, .5f,
2.f, 2.f, 2.f, 2.f, 2.f,
.5f, 1.f, 2.f, 3.f, 4.f
- }));
+ };
}
-
std::vector<float> biasValues({0.f, 0.f, 0.f});
if (biasEnabled)
{
- biasValues = std::vector<float>({10.f, 20.f, 30.f});
+ biasValues = std::vector<float>({10.f, 20.f, 30.f});
}
- boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
result = SimpleFullyConnectedTestImpl<float>(
workloadFactory,
@@ -411,21 +407,21 @@ LayerTestResult<float, 2> FullyConnectedFloat32Test(
tensorHandleFactory,
inputTensorInfo, outputTensorInfo,
weightsDesc, biasesDesc,
- weights, bias, input,
+ weights, biasValues, input,
biasEnabled, transposeWeights
);
- result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
- {
- 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
- 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
- 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
-
- 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
- 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
- 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
- })
- );
+ std::vector<float> expectedOutput =
+ {
+ 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
+ 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
+ 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
+
+ 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
+ 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
+ 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
+ };
+ result.m_ExpectedData = expectedOutput;
return result;
}
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index 7fabff6c1c..51df1eb847 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -33,11 +33,8 @@ LayerTestResult<T, OutputDim> GatherTestImpl(
const std::vector<T>& outputData)
{
IgnoreUnused(memoryManager);
- auto params = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
- auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
- LayerTestResult<T, OutputDim> result(outputInfo);
- result.outputExpected = MakeTensor<T, OutputDim>(outputInfo, outputData);
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> paramsHandle = tensorHandleFactory.CreateTensorHandle(paramsInfo);
std::unique_ptr<armnn::ITensorHandle> indicesHandle = tensorHandleFactory.CreateTensorHandle(indicesInfo);
@@ -55,14 +52,17 @@ LayerTestResult<T, OutputDim> GatherTestImpl(
indicesHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(paramsHandle.get(), params.origin());
- CopyDataToITensorHandle(indicesHandle.get(), indices.origin());
+ CopyDataToITensorHandle(paramsHandle.get(), paramsData.data());
+ CopyDataToITensorHandle(indicesHandle.get(), indicesData.data());
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, OutputDim>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 24a4dc4789..ed656daa02 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -36,19 +36,15 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
int32_t qOffset = 0)
{
IgnoreUnused(memoryManager);
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
+ std::vector<T> inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::WorkloadInfo info;
-
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -57,13 +53,16 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index 227ac63941..e242fd31d3 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -30,7 +30,7 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
const std::vector<float>& inputValues,
float outScale,
int32_t outOffset,
- const std::vector<float>& expectedOutputValues,
+ std::vector<float>& expectedOutputValues,
const armnn::DataLayout layout,
float epsilon = 1e-12f)
{
@@ -48,26 +48,23 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
inputData = tmp;
}
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
+ auto inputTensor = armnnUtils::QuantizedVector<T>(inputData,
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset());
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
- std::vector<float> expectedOutputData = expectedOutputValues;
if (layout == armnn::DataLayout::NHWC)
{
- std::vector<float> tmp(expectedOutputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
+ std::vector<float> tmp(expectedOutputValues.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputValues.data(), tmp.data(),
sizeof(float));
- expectedOutputData = tmp;
+ expectedOutputValues = tmp;
}
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected =
- MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(expectedOutputData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputValues,
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -85,14 +82,17 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
float CalcInvL2Norm(std::initializer_list<float> elements)
@@ -725,10 +725,7 @@ LayerTestResult<float, 2> L2Normalization2dShapeTest(
const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
- auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
-
- LayerTestResult<float, 2> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -746,14 +743,17 @@ LayerTestResult<float, 2> L2Normalization2dShapeTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<float, 2>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float, 4> L2Normalization3dTest(
diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
index c64fc88024..ac60764964 100644
--- a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
+++ b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
@@ -8,38 +8,56 @@
#include <armnn/Tensor.hpp>
#include <armnn/utility/Assert.hpp>
-#include <boost/multi_array.hpp>
-
#include <cstddef>
-
-template <std::size_t n>
-boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
-{
- ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
- "Attempting to construct a shape array of mismatching size");
-
- boost::array<unsigned int, n> shape;
- for (unsigned int i = 0; i < n; i++)
- {
- shape[i] = tensorInfo.GetShape()[i];
- }
- return shape;
-}
+#include <vector>
template <typename T, std::size_t n>
struct LayerTestResult
{
LayerTestResult(const armnn::TensorInfo& outputInfo)
+ : m_Supported(true)
+ , m_CompareBoolean(false)
{
- auto shape( GetTensorShapeAsArray<n>(outputInfo) );
- output.resize(shape);
- outputExpected.resize(shape);
- supported = true;
- compareBoolean = false;
+ m_ActualData.reserve(outputInfo.GetNumElements());
+ m_ExpectedData.reserve(outputInfo.GetNumElements());
+ m_ActualShape = outputInfo.GetShape();
+ m_ExpectedShape = outputInfo.GetShape();
}
- boost::multi_array<T, n> output;
- boost::multi_array<T, n> outputExpected;
- bool supported;
- bool compareBoolean;
+ LayerTestResult(const std::vector<T>& actualData,
+ const std::vector<T>& expectedData,
+ const armnn::TensorShape& actualShape,
+ const armnn::TensorShape& expectedShape)
+ : m_ActualData(actualData)
+ , m_ExpectedData(expectedData)
+ , m_ActualShape(actualShape)
+ , m_ExpectedShape(expectedShape)
+ , m_Supported(true)
+ , m_CompareBoolean(false)
+ {}
+
+ LayerTestResult(const std::vector<T>& actualData,
+ const std::vector<T>& expectedData,
+ const armnn::TensorShape& actualShape,
+ const armnn::TensorShape& expectedShape,
+ const bool compareBoolean)
+ : m_ActualData(actualData)
+ , m_ExpectedData(expectedData)
+ , m_ActualShape(actualShape)
+ , m_ExpectedShape(expectedShape)
+ , m_Supported(true)
+ , m_CompareBoolean(compareBoolean)
+ {}
+
+ std::vector<T> m_ActualData;
+ std::vector<T> m_ExpectedData;
+ armnn::TensorShape m_ActualShape;
+ armnn::TensorShape m_ExpectedShape;
+
+ bool m_Supported;
+ bool m_CompareBoolean;
};
+
+
+
+
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index f32d367d37..ad23f8f380 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -38,9 +38,11 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
int32_t qOffset = 0)
{
IgnoreUnused(memoryManager);
- LayerTestResult<T, NumDims> result(outputInfo);
- result.outputExpected =
- MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
+
+ auto inputTensor = armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset);
+
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
@@ -55,14 +57,17 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
- CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, NumDims>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
- return result;
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
index 4f04673171..119e76bda9 100644
--- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -35,9 +35,7 @@ LayerTestResult<uint8_t, NumDims> LogicalUnaryTestHelper(
ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
- auto inputTensor = MakeTensor<uint8_t, NumDims>(inputTensorInfo, input);
-
- LayerTestResult <uint8_t, NumDims> ret(outputTensorInfo);
+ std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr <armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -55,16 +53,18 @@ LayerTestResult<uint8_t, NumDims> LogicalUnaryTestHelper(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, expectedOutput);
- ret.compareBoolean = true;
- return ret;
+ return LayerTestResult<uint8_t, NumDims>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape(),
+ true);
}
template <std::size_t NumDims>
@@ -89,10 +89,7 @@ LayerTestResult<uint8_t, NumDims> LogicalBinaryTestHelper(
ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
- auto inputTensor0 = MakeTensor<uint8_t, NumDims>(inputTensorInfo0, input0);
- auto inputTensor1 = MakeTensor<uint8_t, NumDims>(inputTensorInfo1, input1);
-
- LayerTestResult <uint8_t, NumDims> ret(outputTensorInfo);
+ std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr <armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr <armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
@@ -113,17 +110,19 @@ LayerTestResult<uint8_t, NumDims> LogicalBinaryTestHelper(
inputHandle1->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle0.get(), inputTensor0.origin());
- CopyDataToITensorHandle(inputHandle1.get(), inputTensor1.origin());
+ CopyDataToITensorHandle(inputHandle0.get(), input0.data());
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
workload->PostAllocationConfigure();
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, expectedOutput);
- ret.compareBoolean = true;
- return ret;
+ return LayerTestResult<uint8_t, NumDims>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape(),
+ true);
}
class UnaryTestData
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 1c63542dcb..11003a2e97 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -20,18 +20,17 @@
#include <test/TensorHelpers.hpp>
-#include <boost/multi_array.hpp>
-
namespace
{
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void LstmUtilsVectorBatchVectorAddTestImpl(
- boost::multi_array<float, 1>& vec,
- boost::multi_array<float, 2>& batchVec,
+ std::vector<float>& vec,
+ std::vector<float>& batchVec,
uint32_t vSize,
uint32_t nBatch,
- boost::multi_array<float, 2>& expectedOutput )
+ std::vector<float>& expectedOutput,
+ armnn::TensorShape& expectedShape)
{
float qScale = 0.0f;
int32_t qOffset = 0;
@@ -45,19 +44,20 @@ void LstmUtilsVectorBatchVectorAddTestImpl(
VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
// check shape and compare values
- auto result = CompareTensors(batchVec, expectedOutput);
+ auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape);
BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
batchVecEncoder->Set(1.0f);
- BOOST_TEST(batchVec[0][0] == 1.0f);
+ BOOST_TEST(batchVec[0] == 1.0f);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void LstmUtilsZeroVectorTestImpl(
- boost::multi_array<float, 1>& input,
+ std::vector<float>& input,
uint32_t vSize,
- boost::multi_array<float, 1>& expectedOutput)
+ std::vector<float>& expectedOutput,
+ armnn::TensorShape& expectedShape)
{
float qScale = 0.0f;
int32_t qOffset = 0;
@@ -71,7 +71,7 @@ void LstmUtilsZeroVectorTestImpl(
ZeroVector(*outputEncoder, vSize);
// check shape and compare values
- auto result = CompareTensors(input, expectedOutput);
+ auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape);
BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
@@ -82,10 +82,11 @@ void LstmUtilsZeroVectorTestImpl(
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void LstmUtilsMeanStddevNormalizationTestImpl(
- boost::multi_array<float, 2>& input,
+ std::vector<float>& input,
uint32_t vSize,
uint32_t nBatch,
- boost::multi_array<float, 2>& expectedOutput)
+ std::vector<float>& expectedOutput,
+ armnn::TensorShape& expectedShape)
{
float qScale = 0.0f;
int32_t qOffset = 0;
@@ -98,21 +99,22 @@ void LstmUtilsMeanStddevNormalizationTestImpl(
MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
// check shape and compare values
- auto result = CompareTensors(input, expectedOutput);
+ auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape);
BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
outputEncoder->Set(1.0f);
- BOOST_TEST(input[0][0] == 1.0f);
+ BOOST_TEST(input[0] == 1.0f);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
- boost::multi_array<float, 1>& vec,
- boost::multi_array<float, 2>& batchVec,
+ std::vector<float>& vec,
+ std::vector<float>& batchVec,
uint32_t vSize,
uint32_t nBatch,
- boost::multi_array<float, 2>& expectedOutput)
+ std::vector<float>& expectedOutput,
+ armnn::TensorShape& expectedShape)
{
float qScale = 0.0f;
int32_t qOffset = 0;
@@ -126,12 +128,12 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
// check shape and compare values
- auto result = CompareTensors(batchVec, expectedOutput);
+ auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape);
BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
batchVecEncoder->Set(1.0f);
- BOOST_TEST(batchVec[0][0] == 1.0f);
+ BOOST_TEST(batchVec[0] == 1.0f);
}
// Lstm Layer tests:
@@ -142,16 +144,18 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 2>& input,
- const boost::multi_array<T, 2>& outputExpected,
+ const std::vector<T>& input,
+ const std::vector<T>& outputExpected,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& outputExpectedShape,
float qScale = 0.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
IgnoreUnused(memoryManager);
- unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
+ unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
// cellSize and outputSize have the same size when there is no projection.
unsigned numUnits = outputSize;
@@ -164,30 +168,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
- LayerTestResult<T, 2> ret(outputTensorInfo);
-
std::vector<T> inputVector;
inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
- auto inputTensor = MakeTensor<T,2>(inputTensorInfo, inputVector);
std::vector<T> cellStateInVector(batchSize * numUnits, T());
- auto cellStateInTensor = MakeTensor<T,2>(cellStateInTensorInfo, cellStateInVector);
-
std::vector<T> outputStateInVector(batchSize * outputSize, T());
- auto outputStateInTensor = MakeTensor<T,2>(outputStateInTensorInfo, outputStateInVector);
-
std::vector<T> scratchBufferVector(batchSize * numUnits * 4, T());
- auto scratchBufferTensor = MakeTensor<T,2>(scratchBufferTensorInfo, scratchBufferVector);
-
std::vector<T> outputStateOutVector(batchSize * outputSize, T());
- auto outputStateOutTensor = MakeTensor<T,2>(outputStateOutTensorInfo, outputStateOutVector);
-
std::vector<T> cellStateOutVector(batchSize * numUnits, T());
- auto cellStateOutTensor = MakeTensor<T,2>(cellStateOutTensorInfo, cellStateOutVector);
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::vector<T> outputVector;
outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
- ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputVector);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
@@ -219,59 +212,59 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
- auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
- -0.34550029f, 0.04266912f, -0.15680569f,
- -0.34856534f, 0.43890524f});
+ std::vector<float> inputToInputWeights = {-0.45018822f, -0.02338299f, -0.0870589f,
+ -0.34550029f, 0.04266912f, -0.15680569f,
+ -0.34856534f, 0.43890524f};
- auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f,
- -0.31343272f, -0.40032279f, 0.44781327f,
- 0.01387155f, -0.35593212f});
+ std::vector<float> inputToForgetWeights = { 0.09701663f, 0.20334584f, -0.50592935f,
+ -0.31343272f, -0.40032279f, 0.44781327f,
+ 0.01387155f, -0.35593212f};
- auto inputToCellWeights = MakeTensor<float, 2>(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
- -0.20583314f, 0.44344562f, 0.22077113f,
- -0.29909778f});
+ std::vector<float> inputToCellWeights = { -0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
+ -0.20583314f, 0.44344562f, 0.22077113f,
+ -0.29909778f};
- auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f,
- 0.40525138f, 0.44272184f, 0.03897077f,
- -0.1556896f, 0.19487578f});
+ std::vector<float> inputToOutputWeights = { -0.25065863f, -0.28290087f, 0.04613829f,
+ 0.40525138f, 0.44272184f, 0.03897077f,
+ -0.1556896f, 0.19487578f};
- auto recurrentToInputWeights = MakeTensor<float, 2>(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f,
- -0.35746509f, 0.28902304f, 0.08183324f,
- -0.16555229f, 0.02286911f, -0.13566875f,
- 0.03034258f, 0.48091322f, -0.12528998f,
- 0.24077177f, -0.51332325f, -0.33502164f,
- 0.10629296f});
+ std::vector<float> recurrentToInputWeights = {-0.0063535f, -0.2042388f, 0.31454784f,
+ -0.35746509f, 0.28902304f, 0.08183324f,
+ -0.16555229f, 0.02286911f, -0.13566875f,
+ 0.03034258f, 0.48091322f, -0.12528998f,
+ 0.24077177f, -0.51332325f, -0.33502164f,
+ 0.10629296f};
- auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f,
- 0.2112639f, 0.27654213f, 0.20864892f,
- -0.07646349f, 0.45877004f, 0.00141793f,
- -0.14609534f, 0.36447752f, 0.09196436f,
- 0.28053468f, 0.01560611f, -0.20127171f,
- -0.01140004f});
+ std::vector<float> recurrentToForgetWeights = { -0.48684245f, -0.06655136f, 0.42224967f,
+ 0.2112639f, 0.27654213f, 0.20864892f,
+ -0.07646349f, 0.45877004f, 0.00141793f,
+ -0.14609534f, 0.36447752f, 0.09196436f,
+ 0.28053468f, 0.01560611f, -0.20127171f,
+ -0.01140004f};
- auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f,
- 0.26320225f, 0.05695659f, -0.00123841f,
- -0.4744786f, -0.35869038f, -0.06418842f,
- -0.13502428f, -0.501764f, 0.22830659f,
- -0.46367589f, 0.26016325f, -0.03894562f,
- -0.16368064f});
+ std::vector<float> recurrentToCellWeights = { -0.3407414f, 0.24443203f, -0.2078532f,
+ 0.26320225f, 0.05695659f, -0.00123841f,
+ -0.4744786f, -0.35869038f, -0.06418842f,
+ -0.13502428f, -0.501764f, 0.22830659f,
+ -0.46367589f, 0.26016325f, -0.03894562f,
+ -0.16368064f};
- auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f,
- 0.09215671f, 0.24107647f, -0.39835793f,
- 0.18212086f, 0.01301402f, 0.48572797f,
- -0.50656658f, 0.20047462f, -0.20607421f,
- -0.51818722f, -0.15390486f, 0.0468148f,
- 0.39922136f});
+ std::vector<float> recurrentToOutputWeights = { 0.43385774f, -0.17194885f, 0.2718237f,
+ 0.09215671f, 0.24107647f, -0.39835793f,
+ 0.18212086f, 0.01301402f, 0.48572797f,
+ -0.50656658f, 0.20047462f, -0.20607421f,
+ -0.51818722f, -0.15390486f, 0.0468148f,
+ 0.39922136f};
- auto cellToInputWeights = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+ std::vector<float> cellToInputWeights = {0., 0., 0., 0.};
- auto inputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+ std::vector<float> inputGateBias = {0., 0., 0., 0.};
- auto forgetGateBias = MakeTensor<float, 1>(tensorInfo4, {1., 1., 1., 1.});
+ std::vector<float> forgetGateBias = {1., 1., 1., 1.};
- auto cellBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+ std::vector<float> cellBias = {0., 0., 0., 0.};
- auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+ std::vector<float> outputGateBias = {0., 0., 0., 0.};
armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
@@ -287,19 +280,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
- AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
- AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+ AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
data.m_InputToInputWeights = &inputToInputWeightsTensor;
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -330,15 +323,18 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -346,8 +342,8 @@ LayerTestResult<T, 2>
LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 2>& input,
- const boost::multi_array<T, 2>& outputExpected,
+ const std::vector<T>& input,
+ const std::vector<T>& outputExpected,
float qScale = 0.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
@@ -368,30 +364,19 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
- LayerTestResult<T, 2> ret(outputTensorInfo);
-
std::vector<T> inputVector;
inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
- auto inputTensor = MakeTensor<T,2>(inputTensorInfo, inputVector);
std::vector<T> cellStateInVector(batchSize * numUnits, T());
- auto cellStateInTensor = MakeTensor<T,2>(cellStateInTensorInfo, cellStateInVector);
-
std::vector<T> outputStateInVector(batchSize * outputSize, T());
- auto outputStateInTensor = MakeTensor<T,2>(outputStateInTensorInfo, outputStateInVector);
-
std::vector<T> scratchBufferVector(batchSize * numUnits * 4, T());
- auto scratchBufferTensor = MakeTensor<T,2>(scratchBufferTensorInfo, scratchBufferVector);
-
std::vector<T> outputStateOutVector(batchSize * outputSize, T());
- auto outputStateOutTensor = MakeTensor<T,2>(outputStateOutTensorInfo, outputStateOutVector);
-
std::vector<T> cellStateOutVector(batchSize * numUnits, T());
- auto cellStateOutTensor = MakeTensor<T,2>(cellStateOutTensorInfo, cellStateOutVector);
+
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::vector<T> outputVector;
outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
- ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputVector);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
@@ -425,135 +410,118 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset);
armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset);
- auto inputToInputWeights =
- MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
- 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f,
- -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
- -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
- -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f,
- -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
- -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
- 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f,
- 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f,
- 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f,
- -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
- 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f,
- -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
- -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f,
- -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
- 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f,
- -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f,
- -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
- -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
- -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f
- });
-
- auto inputToForgetWeights =
- MakeTensor<float, 2>(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
- -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
- -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
- 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
- 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
- -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
- -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
- 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
- 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
- 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f,
- 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
- -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
- 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
- -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f,
- -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
- 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
- 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f,
- 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
- -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
- 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f
- });
-
- auto inputToCellWeights =
- MakeTensor<float, 2>(tensorInfo20x5, {-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
- -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
- -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
- -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
- -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
- 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f,
- -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f,
- 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f,
- -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f,
- -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f,
- -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f,
- 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f,
- 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f,
- 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f,
- -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f,
- -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f,
- -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f,
- -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f,
- -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f,
- -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f,
- 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f,
- 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f,
- 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f,
- 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f,
- 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f
- });
-
- auto inputToOutputWeights =
- MakeTensor<float, 2>(tensorInfo20x5, {-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
- -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f,
- 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
- -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
- -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
- 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
- -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f,
- -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
- -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
- -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f,
- 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
- 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f,
- 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
- -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f,
- 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f,
- 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
- -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f,
- 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
- -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
- -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f
- });
-
- auto inputGateBias =
- MakeTensor<float, 1>(tensorInfo20, {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
- -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
- -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
- 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
- });
-
- auto forgetGateBias =
- MakeTensor<float, 1>(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
- 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f,
- 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f,
- -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f,
- 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
- });
-
- auto cellBias =
- MakeTensor<float, 1>(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f,
- -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f,
- -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f,
- -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f,
- 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
- });
-
- auto outputGateBias =
- MakeTensor<float, 1>(tensorInfo20, {0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
- 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
- 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
- -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
- });
-
- auto recurrentToInputWeights =
- MakeTensor<float, 2>(tensorInfo20x16, {-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
+ std::vector<float> inputToInputWeights = {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
+ 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f,
+ -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
+ -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
+ -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f,
+ -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
+ -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
+ 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f,
+ 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f,
+ 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f,
+ -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
+ 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f,
+ -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
+ -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f,
+ -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
+ 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f,
+ -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f,
+ -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
+ -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
+ -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f };
+
+ std::vector<float> inputToForgetWeights = {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
+ -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
+ -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
+ 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
+ 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
+ -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
+ -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
+ 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
+ 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
+ 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f,
+ 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
+ -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
+ 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
+ -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f,
+ -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
+ 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
+ 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f,
+ 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
+ -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
+ 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f };
+
+ std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
+ -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
+ -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
+ -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
+ -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
+ 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f,
+ -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f,
+ 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f,
+ -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f,
+ -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f,
+ -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f,
+ 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f,
+ 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f,
+ 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f,
+ -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f,
+ -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f,
+ -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f,
+ -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f,
+ -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f,
+ -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f,
+ 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f,
+ 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f,
+ 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f,
+ 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f,
+ 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f };
+
+ std::vector<float> inputToOutputWeights ={-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
+ -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f,
+ 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
+ -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
+ -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
+ 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
+ -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f,
+ -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
+ -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
+ -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f,
+ 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
+ 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f,
+ 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
+ -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f,
+ 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f,
+ 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
+ -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f,
+ 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
+ -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
+ -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f };
+
+ std::vector<float> inputGateBias = {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
+ -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
+ -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
+ 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f };
+
+ std::vector<float> forgetGateBias ={0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
+ 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f,
+ 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f,
+ -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f,
+ 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f };
+
+ std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f,
+ -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f,
+ -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f,
+ -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f,
+ 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f };
+
+ std::vector<float> outputGateBias ={0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
+ 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
+ 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
+ -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f};
+
+ std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
-0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
-0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
-0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
@@ -632,11 +600,9 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
-0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f,
0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
-0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
- -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
- });
+ -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f };
- auto recurrentToForgetWeights =
- MakeTensor<float, 2>(tensorInfo20x16, {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
+ std::vector<float> recurrentToForgetWeights = {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
-0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
@@ -715,11 +681,9 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
-0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f,
-0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
- -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
- });
+ -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f };
- auto recurrentToCellWeights =
- MakeTensor<float, 2>(tensorInfo20x16, {-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
+ std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
-0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
@@ -798,12 +762,10 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
-0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
-0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
- -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
- });
+ -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f };
- auto recurrentToOutputWeights =
- MakeTensor<float, 2>(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
- -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
+ std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
+ -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
-0.029587349f, -0.044576716f, -0.07480124f, -0.082868785f,
0.023254942f, 0.027502948f, -0.0039728214f, -0.08683098f,
-0.08116779f, -0.014675607f, -0.037924774f, -0.023314456f,
@@ -879,101 +841,90 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
-0.05347844f, -0.11768019f, 0.085926116f, -0.08251791f,
-0.045081906f, 0.0948852f, 0.068401024f, 0.024856757f,
0.06978981f, -0.057309967f, -0.012775832f, -0.0032452994f,
- 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f
- });
-
- auto cellToInputWeights =
- MakeTensor<float, 1>(tensorInfo20, {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
- -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
- -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
- 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
- });
-
-
- auto cellToForgetWeights =
- MakeTensor<float, 1>(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f,
- -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
- -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f,
- 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
- });
-
- auto cellToOutputWeights =
- MakeTensor<float, 1>(tensorInfo20, {0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
- -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
- -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
- 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
- });
-
- auto projectionWeights =
- MakeTensor<float, 2>(tensorInfo16x20,
- {-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
- 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
- -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
- -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
- 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
- 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
- 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
- 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
- -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
- -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
- -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
- 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
- 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
- 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
- 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
- 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
- -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
- 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
- -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
- 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
- -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
- -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
- 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
- -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
- 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
- -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
- -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
- 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
- -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
- -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
- -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
- 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
- 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
- -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
- 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
- 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
- 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
- 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
- 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
- -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
- -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
- 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
- -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
- -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
- 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
- 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
- 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
- -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
- -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
- -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
- 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
- -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
- 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
- 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
- -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
- -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
- -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
- 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
- -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
- -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
- -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
- 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
- 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
- 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
- });
+ 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f};
+
+ std::vector<float> cellToInputWeights = {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
+ -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
+ -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
+ 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f};
+
+
+ std::vector<float> cellToForgetWeights = {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f,
+ -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
+ -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f,
+ 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f};
+
+ std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
+ -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
+ -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
+ 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f};
+
+ std::vector<float> projectionWeights={-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
+ 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
+ -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
+ -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
+ 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
+ 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
+ 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
+ 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
+ -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
+ -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
+ -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
+ 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
+ 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
+ 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
+ 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
+ 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
+ -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
+ 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
+ -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
+ 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
+ -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
+ -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
+ 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
+ -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
+ 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
+ -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
+ -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
+ 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
+ -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
+ -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
+ -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
+ 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
+ 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
+ -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
+ 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
+ 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
+ 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
+ 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
+ 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
+ -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
+ -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
+ 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
+ -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
+ -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
+ 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
+ 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
+ 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
+ -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
+ -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
+ -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
+ 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
+ -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
+ 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
+ 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
+ -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
+ -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
+ -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
+ 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
+ -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
+ -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
+ -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
+ 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
+ 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
+ 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f};
std::vector<float> projectionBiasVector(outputSize, 0.f);
- auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
@@ -993,23 +944,23 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20);
armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16);
- AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
- AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
- AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
+ AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
+ AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
data.m_InputToInputWeights = &inputToInputWeightsTensor;
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -1035,7 +986,6 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
data.m_Parameters.m_PeepholeEnabled = true;
data.m_Parameters.m_ProjectionEnabled = true;
-
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
@@ -1046,16 +996,18 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
-
- return ret;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ return LayerTestResult<T, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1063,8 +1015,10 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 2>& input,
- const boost::multi_array<T, 2>& outputExpected,
+ const std::vector<T>& input,
+ const std::vector<T>& outputExpected,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& outputExpectedShape,
float qScale = 0.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
@@ -1074,10 +1028,10 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
bool peepholeEnabled = true;
bool projectionEnabled = false;
// These are not the input and the output of Lstm yet
- unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
+ unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
- unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
const unsigned int cellSize = outputSize;
@@ -1095,14 +1049,10 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
// List of inputs
std::vector<float> inputData;
inputData.assign(input.data(), input.data() + batchSize*inputSize);
- auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputData);
std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
- auto outputStateInTensor = MakeTensor<float, 2>(outputStateInTensorInfo, outputStateInVector);
std::vector<float> cellStateInVector(batchSize * cellSize, 0.f);
- auto cellStateInTensor = MakeTensor<float, 2>(cellStateInTensorInfo, cellStateInVector);
-
// Prepare all the weights in the descriptor for LSTM
armnn::LstmQueueDescriptor data;
@@ -1110,41 +1060,51 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset);
armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset);
- auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
- {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
- 0.04717243f, 0.48944736f, -0.38535351f,
- -0.17212132f});
- auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfoInput,
- {-0.55291498f, -0.42866567f, 0.13056988f,
- -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
- 0.33826375f});
- auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfoInput,
- {0.10725588f, -0.02335852f, -0.55932593f,
- -0.09426838f, -0.44257352f, 0.54939759f,
- 0.01533556f, 0.42751634f});
- auto cellBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
- auto forgetGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f});
- auto outputGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
-
- auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfoOutput,
- {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
- 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
- 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
- 0.21193194f});
- auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfoOutput,
- {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
- 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
- -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f});
-
- auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfoOutput,
- {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
- -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
- 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f});
-
- auto cellToForgetWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
- {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f});
- auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
- {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
+ std::vector<float> inputToCellWeights =
+ {
+ -0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
+ 0.04717243f, 0.48944736f, -0.38535351f,
+ -0.17212132f
+ };
+ std::vector<float> inputToForgetWeights =
+ {
+ -0.55291498f, -0.42866567f, 0.13056988f,
+ -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
+ 0.33826375f
+ };
+ std::vector<float> inputToOutputWeights =
+ {
+ 0.10725588f, -0.02335852f, -0.55932593f,
+ -0.09426838f, -0.44257352f, 0.54939759f,
+ 0.01533556f, 0.42751634f
+ };
+ std::vector<float> cellBias = {0.f, 0.f, 0.f, 0.f};
+ std::vector<float> forgetGateBias = {1.f, 1.f, 1.f, 1.f};
+ std::vector<float> outputGateBias = {0.f, 0.f, 0.f, 0.f};
+
+ std::vector<float> recurrentToCellWeights =
+ {
+ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
+ 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
+ 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
+ 0.21193194f
+ };
+ std::vector<float> recurrentToForgetWeights =
+ {
+ -0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
+ 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
+ -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f
+ };
+
+ std::vector<float> recurrentToOutputWeights =
+ {
+ 0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
+ -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
+ 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f
+ };
+
+ std::vector<float> cellToForgetWeights = {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
+ std::vector<float> cellToOutputWeights = {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput);
armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
@@ -1158,25 +1118,23 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
-
armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
-
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
- AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
data.m_InputToCellWeights = &inputToCellWeightsTensor;
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -1202,29 +1160,28 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
data.m_Parameters.m_ClippingThresProj = 0.0;
data.m_Parameters.m_ClippingThresCell = 0.0;
-
// List of outputs
std::vector<T> scratchBufferVector(batchSize * scratchBufferSize, T());
- auto scratchBufferTensor = MakeTensor<T,2>(scratchBufferTensorInfo, scratchBufferVector);
LayerTestResult<T, 2> ret0(scratchBufferTensorInfo);
// Output state for a certain time step
std::vector<T> outputStateOutVector(batchSize * outputSize, T());
- auto outputStateOutTensor = MakeTensor<T,2>(outputStateOutTensorInfo, outputStateOutVector);
LayerTestResult<T, 2> ret1(outputStateOutTensorInfo);
// Cell state for a certain time step
std::vector<T> cellStateOutVector(batchSize * cellSize, T());
- auto cellStateOutTensor = MakeTensor<T,2>(cellStateOutTensorInfo, cellStateOutVector);
LayerTestResult<T, 2> ret2(cellStateOutTensorInfo);
// Output for a certain time step
- std::vector<T> outputVector(batchSize * outputSize, T());
- auto outputTensor = MakeTensor<T, 2>(outputTensorInfo, outputVector);
std::vector<T> outputData;
outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize);
LayerTestResult<T, 2> ret3(outputTensorInfo);
- ret3.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputData);
+ ret3.m_ExpectedData = outputData;
+
+ std::vector<T> actualScratchBufferOutput(scratchBufferTensorInfo.GetNumElements());
+ std::vector<T> actualOutputStateOutput(outputStateOutTensorInfo.GetNumElements());
+ std::vector<T> actualCellStateOutput(cellStateOutTensorInfo.GetNumElements());
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
// Prepare the inputs and outputs for the workload
std::unique_ptr<armnn::ITensorHandle> inputHandle =
@@ -1255,7 +1212,6 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
-
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1265,21 +1221,25 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
cellStateOutHandle->Allocate();
outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
-
- CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]);
- CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]);
- CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]);
+ CopyDataToITensorHandle(scratchBufferHandle.get(), scratchBufferVector.data());
+ CopyDataToITensorHandle(outputStateOutHandle.get(), outputStateOutVector.data());
+ CopyDataToITensorHandle(cellStateOutHandle.get(), cellStateOutVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get());
- CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get());
- CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get());
- CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualScratchBufferOutput.data(), scratchBufferHandle.get());
+ CopyDataFromITensorHandle(actualOutputStateOutput.data(), outputStateOutHandle.get());
+ CopyDataFromITensorHandle(actualCellStateOutput.data(), cellStateOutHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ ret0.m_ActualData = actualScratchBufferOutput;
+ ret1.m_ActualData = actualOutputStateOutput;
+ ret2.m_ActualData = actualCellStateOutput;
+ ret3.m_ActualData = actualOutput;
return ret3;
}
@@ -1289,8 +1249,8 @@ LayerTestResult<T, 2>
LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<T, 2>& input,
- const boost::multi_array<T, 2>& outputExpected,
+ const std::vector<T>& input,
+ const std::vector<T>& outputExpected,
float qScale = 0.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
@@ -1311,30 +1271,19 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
- LayerTestResult<T, 2> ret(outputTensorInfo);
-
std::vector<float> inputVector;
inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
- auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
- auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
-
std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
- auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
-
std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
- auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
-
std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
- auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
-
std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
- auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::vector<float> outputVector;
outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
- ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
@@ -1368,95 +1317,73 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset);
armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset);
- auto inputToInputWeights =
- MakeTensor<float, 2>(tensorInfo4x5, { 0.5f, 0.6f, 0.7f, -0.8f, -0.9f,
- 0.1f, 0.2f, 0.3f, -0.4f, 0.5f,
- -0.8f, 0.7f, -0.6f, 0.5f, -0.4f,
- -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}); //{numUnits, inputSize}
+ std::vector<float> inputToInputWeights = {0.5f, 0.6f, 0.7f, -0.8f, -0.9f,
+ 0.1f, 0.2f, 0.3f, -0.4f, 0.5f,
+ -0.8f, 0.7f, -0.6f, 0.5f, -0.4f,
+ -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}; //{numUnits, inputSize}
- auto inputToForgetWeights =
- MakeTensor<float, 2>(tensorInfo4x5, {-0.6f, -0.1f, 0.3f, 0.2f, 0.9f,
- -0.5f, -0.2f, -0.4f, 0.3f, -0.8f,
- -0.4f, 0.3f, -0.5f, -0.4f, -0.6f,
- 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}); //{numUnits, inputSize}
+ std::vector<float> inputToForgetWeights = { -0.6f, -0.1f, 0.3f, 0.2f, 0.9f,
+ -0.5f, -0.2f, -0.4f, 0.3f, -0.8f,
+ -0.4f, 0.3f, -0.5f, -0.4f, -0.6f,
+ 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}; //{numUnits, inputSize}
- auto inputToCellWeights =
- MakeTensor<float, 2>(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f,
- 0.5f, -0.2f, -0.3f, -0.2f, -0.6f,
- 0.6f, -0.1f, -0.4f, -0.3f, -0.7f,
- 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}); //{numUnits, inputSize}
+ std::vector<float> inputToCellWeights = {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f,
+ 0.5f, -0.2f, -0.3f, -0.2f, -0.6f,
+ 0.6f, -0.1f, -0.4f, -0.3f, -0.7f,
+ 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}; //{numUnits, inputSize}
- auto inputToOutputWeights =
- MakeTensor<float, 2>(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f,
- -0.7f, 0.3f, -0.3f, -0.8f, -0.2f,
- 0.6f, -0.2f, 0.4f, -0.7f, -0.3f,
- -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}); //{numUnits, inputSize}
+ std::vector<float> inputToOutputWeights = {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f,
+ -0.7f, 0.3f, -0.3f, -0.8f, -0.2f,
+ 0.6f, -0.2f, 0.4f, -0.7f, -0.3f,
+ -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}; //{numUnits, inputSize}
- auto inputGateBias =
- MakeTensor<float, 1>(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f}); //{numUnits}
+ std::vector<float> inputGateBias = {0.03f, 0.15f, 0.22f, 0.38f}; //{numUnits}
- auto forgetGateBias =
- MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f}); //{numUnits}
+ std::vector<float> forgetGateBias = {0.1f, -0.3f, -0.2f, 0.1f}; //{numUnits}
- auto cellBias =
- MakeTensor<float, 1>(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits}
+ std::vector<float> cellBias = {-0.05f, 0.72f, 0.25f, 0.08f}; //{numUnits}
- auto outputGateBias =
- MakeTensor<float, 1>(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f}); //{numUnits}
+ std::vector<float> outputGateBias = {0.05f, -0.01f, 0.2f, 0.1f}; //{numUnits}
- auto recurrentToInputWeights =
- MakeTensor<float, 2>(tensorInfo4x3, {-0.2f, -0.3f, 0.4f,
+ std::vector<float> recurrentToInputWeights ={-0.2f, -0.3f, 0.4f,
0.1f, -0.5f, 0.9f,
-0.2f, -0.3f, -0.7f,
- 0.05f, -0.2f, -0.6f}); //{numUnits, outputSize}
+ 0.05f, -0.2f, -0.6f}; //{numUnits, outputSize}
- auto recurrentToCellWeights =
- MakeTensor<float, 2>(tensorInfo4x3, {-0.3f, 0.2f, 0.1f,
+ std::vector<float> recurrentToCellWeights = {-0.3f, 0.2f, 0.1f,
-0.3f, 0.8f, -0.08f,
-0.2f, 0.3f, 0.8f,
- -0.6f, -0.1f, 0.2f}); //{numUnits, outputSize}
+ -0.6f, -0.1f, 0.2f}; //{numUnits, outputSize}
- auto recurrentToForgetWeights =
- MakeTensor<float, 2>(tensorInfo4x3, {-0.5f, -0.3f, -0.5f,
- -0.2f, 0.6f, 0.4f,
- 0.9f, 0.3f, -0.1f,
- 0.2f, 0.5f, 0.2f}); //{numUnits, outputSize}
+ std::vector<float> recurrentToForgetWeights = { -0.5f, -0.3f, -0.5f,
+ -0.2f, 0.6f, 0.4f,
+ 0.9f, 0.3f, -0.1f,
+ 0.2f, 0.5f, 0.2f}; //{numUnits, outputSize}
- auto recurrentToOutputWeights =
- MakeTensor<float, 2>(tensorInfo4x3, { 0.3f, -0.1f, 0.1f,
- -0.2f, -0.5f, -0.7f,
- -0.2f, -0.6f, -0.1f,
- -0.4f, -0.7f, -0.2f}); //{numUnits, outputSize}
+ std::vector<float> recurrentToOutputWeights = { 0.3f, -0.1f, 0.1f,
+ -0.2f, -0.5f, -0.7f,
+ -0.2f, -0.6f, -0.1f,
+ -0.4f, -0.7f, -0.2f}; //{numUnits, outputSize}
- auto cellToInputWeights =
- MakeTensor<float, 1>(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f}); //{numUnits}
+ std::vector<float> cellToInputWeights = {0.05f, 0.1f, 0.25f, 0.15f}; //{numUnits}
- auto cellToForgetWeights =
- MakeTensor<float, 1>(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits}
+ std::vector<float> cellToForgetWeights = {-0.02f, -0.15f, -0.25f, -0.03f}; //{numUnits}
- auto cellToOutputWeights =
- MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f}); //{numUnits}
+ std::vector<float> cellToOutputWeights = {0.1f, -0.1f, -0.5f, 0.05f}; //{numUnits}
- auto projectionWeights =
- MakeTensor<float, 2>(tensorInfo3x4,
- {-0.1f, 0.2f, 0.01f, -0.2f,
- 0.1f, 0.5f, 0.3f, 0.08f,
- 0.07f, 0.2f, -0.4f, 0.2f}); //{outputSize, numUnits}
+ std::vector<float> projectionWeights = {-0.1f, 0.2f, 0.01f, -0.2f,
+ 0.1f, 0.5f, 0.3f, 0.08f,
+ 0.07f, 0.2f, -0.4f, 0.2f}; //{outputSize, numUnits}
- std::vector<float> projectionBiasVector(outputSize, 0.f);
- auto projectionBias = MakeTensor<float,1>(tensorInfo3, projectionBiasVector); //{outputSize}
+ std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
- auto inputLayerNormWeights =
- MakeTensor<float, 1>(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits}
+ std::vector<float> inputLayerNormWeights = {0.1f, 0.2f, 0.3f, 0.5f}; //{numUnits}
- auto forgetLayerNormWeights =
- MakeTensor<float, 1>(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits}
+ std::vector<float> forgetLayerNormWeights = {0.2f, 0.2f, 0.4f, 0.3f}; //{numUnits}
- auto cellLayerNormWeights =
- MakeTensor<float, 1>(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits}
+ std::vector<float> cellLayerNormWeights = {0.7f, 0.2f, 0.3f, 0.8f}; //{numUnits}
- auto outputLayerNormWeights =
- MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
+ std::vector<float> outputLayerNormWeights = {0.6f, 0.2f, 0.2f, 0.5f}; //{numUnits}
armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
@@ -1482,28 +1409,28 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
- AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
- AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
- AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
-
- AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
+ AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
+ AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
+
+ AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
data.m_InputToInputWeights = &inputToInputWeightsTensor;
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -1546,28 +1473,33 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<uint8_t, 2>& input,
- const boost::multi_array<uint8_t, 2>& outputExpected)
+ const std::vector<uint8_t>& input,
+ const std::vector<uint8_t>& outputExpected,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& outputExpectedShape)
{
IgnoreUnused(memoryManager);
- auto numBatches = armnn::numeric_cast<unsigned int>(input.shape()[0]);
- auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
- auto outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ auto numBatches = armnn::numeric_cast<unsigned int>(inputShape[0]);
+ auto inputSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
+ auto outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
// Scale/Offset for input/output, cellState In/Out, weights, bias
float inputOutputScale = 0.0078125f;
@@ -1598,29 +1530,23 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
inputOutputScale,
inputOutputOffset);
- LayerTestResult<uint8_t, 2> ret(outputStateInfo);
-
// Input0
std::vector<uint8_t> inputVector;
inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
- auto inputTensor = MakeTensor<uint8_t, 2>(inputInfo, inputVector);
// Input1
std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13
- auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
-
// Input2
std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14
- auto outputStateInTensor = MakeTensor<uint8_t, 2>(outputStateInfo, outputStateInVector);
// Output0
std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0
- auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
// Output1
std::vector<uint8_t> outputVector; // 1
outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
- ret.outputExpected = MakeTensor<uint8_t, 2>(outputStateInfo, outputVector);
+
+ std::vector<uint8_t> actualOutput(outputStateInfo.GetNumElements());
// Create tensor handles
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
@@ -1658,24 +1584,24 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
// Weights and bias tensor data
- auto inputToInputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108});
- auto inputToForgetWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169});
- auto inputToCellWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183});
- auto inputToOutputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48});
-
- auto recurrentToInputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
- {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26});
- auto recurrentToForgetWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
- {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253});
- auto recurrentToCellWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
- {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216});
- auto recurrentToOutputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
- {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98});
-
- auto inputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-7876, 13488, -726, 32839});
- auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {9206, -46884, -11693, -38724});
- auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
- auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
+ std::vector<uint8_t> inputToInputWeights = {146, 250, 235, 171, 10, 218, 171, 108};
+ std::vector<uint8_t> inputToForgetWeights = {24, 50, 132, 179, 158, 110, 3, 169};
+ std::vector<uint8_t> inputToCellWeights = {133, 34, 29, 49, 206, 109, 54, 183};
+ std::vector<uint8_t> inputToOutputWeights = {195, 187, 11, 99, 109, 10, 218, 48};
+
+ std::vector<uint8_t> recurrentToInputWeights =
+ {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
+ std::vector<uint8_t> recurrentToForgetWeights =
+ {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
+ std::vector<uint8_t> recurrentToCellWeights =
+ {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
+ std::vector<uint8_t> recurrentToOutputWeights =
+ {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
+
+ std::vector<int32_t> inputGateBias = {-7876, 13488, -726, 32839};
+ std::vector<int32_t> forgetGateBias = {9206, -46884, -11693, -38724};
+ std::vector<int32_t> cellBias = {39481, 48624, 48976, -21419};
+ std::vector<int32_t> outputGateBias = {-58999, -17050, -41852, -40538};
// ScopedTensorHandles
armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
@@ -1694,20 +1620,20 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
// Allocate and copy data
- AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+ AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
// Setup queue descriptor
data.m_InputToInputWeights = &inputToInputWeightsTensor;
@@ -1734,15 +1660,18 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputStateInfo.GetShape());
}
// QLSTM: CIFG, LayerNorm
@@ -1750,8 +1679,8 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<int8_t, 2>& input,
- const boost::multi_array<int8_t, 2>& outputExpected)
+ const std::vector<int8_t>& input,
+ const std::vector<int8_t>& outputExpected)
{
IgnoreUnused(memoryManager);
unsigned int numBatches = 2;
@@ -1816,21 +1745,18 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
// Input tensors
std::vector<int8_t> inputVector;
inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
- auto inputTensor = MakeTensor<int8_t, 2>(inputInfo, inputVector);
std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
- auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
- auto outputStateInTensor = MakeTensor<int8_t, 2>(outputStateInfo, outputStateInVector);
// Output tensors
- std::vector<int16_t> cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149};
- auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
+ std::vector<int16_t> cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149};
std::vector<int8_t> outputVector;
outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
- ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
+
+ std::vector<int8_t> actualOutput(outputStateInfo.GetNumElements());
// Create tensor handles
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
@@ -1873,27 +1799,27 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
// Weights and bias tensor data
- auto inputToForgetWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64});
- auto inputToCellWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77});
- auto inputToOutputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51});
-
- auto recurrentToForgetWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51});
- auto recurrentToCellWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64});
- auto recurrentToOutputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38});
-
- auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {2147484, -6442451, -4294968, 2147484});
- auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {-1073742, 15461883, 5368709, 1717987});
- auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {1073742, -214748, 4294968, 2147484});
-
- auto forgetLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {6553, 6553, 13107, 9830});
- auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
- auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
+ std::vector<int8_t> inputToForgetWeights =
+ {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64};
+ std::vector<int8_t> inputToCellWeights =
+ {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77};
+ std::vector<int8_t> inputToOutputWeights =
+ {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51};
+
+ std::vector<int8_t> recurrentToForgetWeights =
+ {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51};
+ std::vector<int8_t> recurrentToCellWeights =
+ {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64};
+ std::vector<int8_t> recurrentToOutputWeights =
+ {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38};
+
+ std::vector<int32_t> forgetGateBias = {2147484, -6442451, -4294968, 2147484};
+ std::vector<int32_t> cellBias = {-1073742, 15461883, 5368709, 1717987};
+ std::vector<int32_t> outputGateBias = {1073742, -214748, 4294968, 2147484};
+
+ std::vector<int16_t> forgetLayerNormWeights = {6553, 6553, 13107, 9830};
+ std::vector<int16_t> cellLayerNormWeights = {22937, 6553, 9830, 26214};
+ std::vector<int16_t> outputLayerNormWeights = {19660, 6553, 6553, 16384};
// ScopedTensorHandles
armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
@@ -1913,21 +1839,21 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
// Allocate and copy data
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
- AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
+ AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
// Setup queue descriptor
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -1972,15 +1898,18 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<int8_t, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputStateInfo.GetShape());
}
// QLSTM: Projection, LayerNorm
@@ -1988,8 +1917,8 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<int8_t, 2>& input,
- const boost::multi_array<int8_t, 2>& outputExpected)
+ const std::vector<int8_t>& input,
+ const std::vector<int8_t>& outputExpected)
{
IgnoreUnused(memoryManager);
unsigned int numBatches = 2;
@@ -2051,26 +1980,21 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
outputScale,
outputOffset);
- LayerTestResult<int8_t, 2> ret(outputStateInfo);
-
// Input tensors
std::vector<int8_t> inputVector;
inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
- auto inputTensor = MakeTensor<int8_t, 2>(inputInfo, inputVector);
std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
- auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0};
- auto outputStateInTensor = MakeTensor<int8_t, 2>(outputStateInfo, outputStateInVector);
// Output tensors
std::vector<int16_t> cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939};
- auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
std::vector<int8_t> outputVector;
outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
- ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
+
+ std::vector<int8_t> actualOutput(outputStateInfo.GetNumElements());
// Create tensor handles
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
@@ -2118,36 +2042,31 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
0);
// Weights and bias tensor data
- auto inputToInputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13});
- auto inputToForgetWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64});
- auto inputToCellWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77});
- auto inputToOutputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51});
-
- auto recurrentToInputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77});
- auto recurrentToForgetWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25});
- auto recurrentToCellWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25});
- auto recurrentToOutputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25});
-
- auto inputGateBias = MakeTensor<int32_t, 1>(biasInfo, {644245, 3221226, 4724464, 8160438});
- auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {2147484, -6442451, -4294968, 2147484});
- auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {-1073742, 15461883, 5368709, 1717987});
- auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {1073742, -214748, 4294968, 2147484});
-
- auto inputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {3277, 6553, 9830, 16384});
- auto forgetLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {6553, 6553, 13107, 9830});
- auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
- auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
-
- auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
- {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
+ std::vector<int8_t> inputToInputWeights =
+ {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13};
+ std::vector<int8_t> inputToForgetWeights =
+ {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64};
+ std::vector<int8_t> inputToCellWeights =
+ {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77};
+ std::vector<int8_t> inputToOutputWeights =
+ {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51};
+
+ std::vector<int8_t> recurrentToInputWeights = {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77};
+ std::vector<int8_t> recurrentToForgetWeights = {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25};
+ std::vector<int8_t> recurrentToCellWeights = {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25};
+ std::vector<int8_t> recurrentToOutputWeights = {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25};
+
+ std::vector<int32_t> inputGateBias = {644245, 3221226, 4724464, 8160438};
+ std::vector<int32_t> forgetGateBias = {2147484, -6442451, -4294968, 2147484};
+ std::vector<int32_t> cellBias = {-1073742, 15461883, 5368709, 1717987};
+ std::vector<int32_t> outputGateBias = {1073742, -214748, 4294968, 2147484};
+
+ std::vector<int16_t> inputLayerNormWeights = {3277, 6553, 9830, 16384};
+ std::vector<int16_t> forgetLayerNormWeights = {6553, 6553, 13107, 9830};
+ std::vector<int16_t> cellLayerNormWeights = {22937, 6553, 9830, 26214};
+ std::vector<int16_t> outputLayerNormWeights = {19660, 6553, 6553, 16384};
+
+ std::vector<int8_t> projectionWeights = {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51};
// ScopedTensorHandles
armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
@@ -2173,27 +2092,27 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
- AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+ AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
- AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
+ AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
- AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
// Setup queue descriptor
data.m_InputToInputWeights = &inputToInputWeightsTensor;
@@ -2244,15 +2163,18 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<int8_t, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputStateInfo.GetShape());
}
// QLSTM: Projection, CIFG, LayerNorm
@@ -2260,8 +2182,8 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- const boost::multi_array<int8_t, 2>& input,
- const boost::multi_array<int8_t, 2>& outputExpected)
+ const std::vector<int8_t>& input,
+ const std::vector<int8_t>& outputExpected)
{
IgnoreUnused(memoryManager);
unsigned int numBatches = 2;
@@ -2323,26 +2245,21 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
outputScale,
outputOffset);
- LayerTestResult<int8_t, 2> ret(outputStateInfo);
-
// Input tensors
std::vector<int8_t> inputVector;
inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
- auto inputTensor = MakeTensor<int8_t, 2>(inputInfo, inputVector);
std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
- auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0};
- auto outputStateInTensor = MakeTensor<int8_t, 2>(outputStateInfo, outputStateInVector);
// Output tensors
- std::vector<int16_t> cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939};
- auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
+ std::vector<int16_t> cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939};
std::vector<int8_t> outputVector;
outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
- ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
+
+ std::vector<int8_t> actualOutput(outputStateInfo.GetNumElements());
// Create tensor handles
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
@@ -2390,30 +2307,29 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
0);
// Weights and bias tensor data
- auto inputToForgetWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64});
- auto inputToCellWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77});
- auto inputToOutputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
- {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51});
-
- auto recurrentToForgetWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25});
- auto recurrentToCellWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25});
- auto recurrentToOutputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
- {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25});
-
- auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {2147484, -6442451, -4294968, 2147484});
- auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {-1073742, 15461883, 5368709, 1717987});
- auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {1073742, -214748, 4294968, 2147484});
-
- auto forgetLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {6553, 6553, 13107, 9830});
- auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
- auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
-
- auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
- {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
+ std::vector<int8_t> inputToForgetWeights =
+ {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64};
+ std::vector<int8_t> inputToCellWeights =
+ {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77};
+ std::vector<int8_t> inputToOutputWeights =
+ {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51};
+
+ std::vector<int8_t> recurrentToForgetWeights =
+ {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25};
+ std::vector<int8_t> recurrentToCellWeights =
+ {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25};
+ std::vector<int8_t> recurrentToOutputWeights =
+ {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25};
+
+ std::vector<int32_t> forgetGateBias = {2147484, -6442451, -4294968, 2147484};
+ std::vector<int32_t> cellBias = {-1073742, 15461883, 5368709, 1717987};
+ std::vector<int32_t> outputGateBias = {1073742, -214748, 4294968, 2147484};
+
+ std::vector<int16_t> forgetLayerNormWeights = {6553, 6553, 13107, 9830};
+ std::vector<int16_t> cellLayerNormWeights = {22937, 6553, 9830, 26214};
+ std::vector<int16_t> outputLayerNormWeights = {19660, 6553, 6553, 16384};
+
+ std::vector<int8_t> projectionWeights = {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51};
// ScopedTensorHandles
armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
@@ -2435,23 +2351,23 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
- AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
- AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
+ AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
- AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
- AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
- AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+ AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
+ AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
+ AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
- AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
- AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
+ AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
+ AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
- AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
+ AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
// Setup queue descriptor
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -2498,15 +2414,18 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
cellStateOutHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
- CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
- CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
+ CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
+ CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<int8_t, 2>(actualOutput,
+ outputVector,
+ outputHandle->GetShape(),
+ outputStateInfo.GetShape());
}
@@ -2519,13 +2438,10 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
void LstmUtilsZeroVectorTest()
{
armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
- boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
- {2., 3., 3., 4.}));
-
- boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
- {0., 0., 0., 0.}));
+ std::vector<float> input = {2., 3., 3., 4.};
+ std::vector<float> expectedOutput = {0., 0., 0., 0.};
- return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
+ return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput, inputDesc.GetShape());
}
void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
@@ -2533,16 +2449,16 @@ void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
uint32_t batchSize = 2;
uint32_t vecSize = 4;
armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
- { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
- 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
+ std::vector<float> input =
+ { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
+ 0.9f, 1.0f, 1.1f, 1.2f }; //batch 1
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
- { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
- -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
+ std::vector<float> expectedOutput =
+ { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
+ -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f }; //batch 1
return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
- vecSize, batchSize, expectedOutput);
+ vecSize, batchSize, expectedOutput, inputDesc.GetShape());
}
void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
@@ -2550,16 +2466,16 @@ void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
uint32_t batchSize = 2;
uint32_t vecSize = 4;
armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+ std::vector<float> input =
{ 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
- 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
+ 0.0f, 0.0f, 0.0f, 0.0f }; //batch 1
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+ std::vector<float> expectedOutput =
{ 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
- 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
+ 0.0f, 0.0f, 0.0f, 0.0f }; //batch 1
return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
- vecSize, batchSize, expectedOutput);
+ vecSize, batchSize, expectedOutput, inputDesc.GetShape());
}
void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
@@ -2567,16 +2483,16 @@ void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
uint32_t batchSize = 2;
uint32_t vecSize = 4;
armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
- { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
- 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
+ std::vector<float> input =
+ { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
+ 0.1f, 0.2f, 0.3f, 0.4f }; //batch 1
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
- { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
- -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
+ std::vector<float> expectedOutput =
+ { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
+ -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }; //batch 1
return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
- vecSize, batchSize, expectedOutput);
+ vecSize, batchSize, expectedOutput, inputDesc.GetShape());
}
void LstmUtilsVectorBatchVectorCwiseProductTest()
@@ -2584,13 +2500,13 @@ void LstmUtilsVectorBatchVectorCwiseProductTest()
uint32_t batchSize = 4;
uint32_t vecSize = 29;
armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
+ std::vector<float> vector =
{ 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
- 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
+ 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f};
armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
+ std::vector<float> batchVector =
{ /* batch 0 */
1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
@@ -2606,10 +2522,10 @@ void LstmUtilsVectorBatchVectorCwiseProductTest()
/* batch 3 */
-1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
-11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
- -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
+ -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f};
// Expect output = input * output + output.
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
+ std::vector<float> expectedOutput =
{ /* batch 0 */
1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
@@ -2633,10 +2549,10 @@ void LstmUtilsVectorBatchVectorCwiseProductTest()
-59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
-172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
-368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
- -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
+ -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f};
return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
- vecSize, batchSize, expectedOutput);
+ vecSize, batchSize, expectedOutput, vecDesc.GetShape());
}
void LstmUtilsVectorBatchVectorAddTest()
@@ -2644,20 +2560,23 @@ void LstmUtilsVectorBatchVectorAddTest()
uint32_t batchSize = 2;
uint32_t vecSize = 3;
armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
- { 0.0f, -0.5f, 1.0f}));
+ std::vector<float> vector = { 0.0f, -0.5f, 1.0f};
armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
- boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
- { 1.0f, 2.0f, 3.0f, //batch 0
- 4.0f, 5.0f, 6.0f})); //batch 1
-
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
- { 1.0f, 1.5f, 4.0f,
- 4.0f, 4.5f, 7.0f}));
+ std::vector<float> batchVector =
+ {
+ 1.0f, 2.0f, 3.0f, //batch 0
+ 4.0f, 5.0f, 6.0f //batch 1
+ };
+
+ std::vector<float> expectedOutput =
+ {
+ 1.0f, 1.5f, 4.0f,
+ 4.0f, 4.5f, 7.0f
+ };
return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
- vecSize, batchSize, expectedOutput);
+ vecSize, batchSize, expectedOutput, batchVecDesc.GetShape());
}
#endif
@@ -2668,15 +2587,15 @@ LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
- { 2., 3., 3., 4. }));
+ std::vector<float> input = { 2., 3., 3., 4. };
armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
+ std::vector<float> expectedOutput =
{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
- -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
+ -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
+ workloadFactory, memoryManager, tensorHandleFactory,
+ input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape());
}
LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
@@ -2685,19 +2604,18 @@ LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+ std::vector<float> input =
{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
- 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
+ 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
- {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
- -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
- -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
- 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
- -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
- 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
- 0.02168f}));
+ std::vector<float> expectedOutput =
+ {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
+ -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
+ -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
+ 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
+ -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
+ 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f};
return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
}
@@ -2708,16 +2626,16 @@ LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
- {2., 3., 3., 4.}));
+ std::vector<float> input = {2., 3., 3., 4.};
armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
- {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
- -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
+ std::vector<float> expectedOutput =
+ {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
+ -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f};
return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
- workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
+ workloadFactory, memoryManager, tensorHandleFactory,
+ input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape());
}
LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
@@ -2726,14 +2644,14 @@ LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLa
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
- boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+ std::vector<float> input =
{0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
- 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
+ 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; //batch 1
armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
- boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
+ std::vector<float> expectedOutput =
{ 0.0244077f, 0.128027f, -0.00170918f, //batch 0
- -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
+ -0.00692428f, 0.0848741f, 0.063445f}; //batch 1
return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
}
@@ -2750,22 +2668,20 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({2, 2}, datatype);
- boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
- inputDesc,
- armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
+ std::vector<int16_t> input = armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset);
armnn::TensorInfo outputDesc({2, 4}, datatype);
- boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(
- outputDesc,
- armnnUtils::QuantizedVector<int16_t>(
- {
- -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
- -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
- },
- qScale, qOffset));
+ std::vector<int16_t> expectedOutput = armnnUtils::QuantizedVector<int16_t>(
+ {
+ -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
+ -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
+ },
+ qScale, qOffset);
return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
- workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
+ workloadFactory, memoryManager, tensorHandleFactory,
+ input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(),
+ qScale, qOffset, constantDatatype);
}
@@ -2781,24 +2697,20 @@ LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
- boost::multi_array<int16_t, 2> input =
- MakeTensor<int16_t, 2>(
- inputDesc,
- armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
+ std::vector<int16_t> input = armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset);
armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
- boost::multi_array<int16_t, 2> expectedOutput =
- MakeTensor<int16_t, 2>(
- outputDesc,
- armnnUtils::QuantizedVector<int16_t>(
- {
- -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
- -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
- },
- qScale, qOffset));
+ std::vector<int16_t> expectedOutput = armnnUtils::QuantizedVector<int16_t>(
+ {
+ -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
+ -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
+ },
+ qScale, qOffset);
return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
- workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
+ workloadFactory, memoryManager, tensorHandleFactory,
+ input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(),
+ qScale, qOffset, constantDatatype);
}
LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
@@ -2813,32 +2725,26 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
- boost::multi_array<int16_t, 2> input =
- MakeTensor<int16_t, 2>(
- inputDesc,
- armnnUtils::QuantizedVector<int16_t>(
- {
- 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
- 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
- },
- qScale, qOffset));
+ std::vector<int16_t> input = armnnUtils::QuantizedVector<int16_t>(
+ {
+ 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
+ 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
+ },
+ qScale, qOffset);
armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
- boost::multi_array<int16_t, 2> expectedOutput =
- MakeTensor<int16_t, 2>(
- outputDesc,
- armnnUtils::QuantizedVector<int16_t>(
- {
- -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f,
- -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f,
- 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
- 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f,
- -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f,
- -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f,
- 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f,
- 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f
- },
- qScale, qOffset));
+ std::vector<int16_t> expectedOutput = armnnUtils::QuantizedVector<int16_t>(
+ {
+ -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f,
+ -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f,
+ 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
+ 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f,
+ -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f,
+ -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f,
+ 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f,
+ 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f
+ },
+ qScale, qOffset);
return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -2855,23 +2761,20 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const
const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16
armnn::TensorInfo inputDesc({2, 2}, datatype);
- boost::multi_array<int16_t , 2> input =
- MakeTensor<int16_t , 2>(inputDesc,
- armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
+ std::vector<int16_t> input = armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset);
armnn::TensorInfo outputDesc({2, 4}, datatype);
- boost::multi_array<int16_t, 2> expectedOutput =
- MakeTensor<int16_t, 2>(
- outputDesc,
- armnnUtils::QuantizedVector<int16_t>(
- {
- -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
- -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
- },
- qScale, qOffset));
+ std::vector<int16_t> expectedOutput = armnnUtils::QuantizedVector<int16_t>(
+ {
+ -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
+ -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
+ },
+ qScale, qOffset);
return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
- workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, datatype);
+ workloadFactory, memoryManager, tensorHandleFactory,
+ input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape(),
+ qScale, qOffset, datatype);
}
//
@@ -2884,14 +2787,13 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTest(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
- {166, 179, 50, 150}));
+ std::vector<uint8_t> input = {166, 179, 50, 150};
armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
- boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
- {140, 151, 146, 112, 136, 156, 142, 112 }));
+ std::vector<uint8_t> expectedOutput = {140, 151, 146, 112, 136, 156, 142, 112 };
- return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
+ return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory,
+ input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape());
}
// QLSTM
@@ -2901,12 +2803,10 @@ LayerTestResult<int8_t, 2> QLstmTest(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
- boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
- {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}));
+ std::vector<int8_t> input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmS8);
- boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
- {-15, 21, 14, 20, -15, 15, 5, 27}));
+ std::vector<int8_t> expectedOutput = {-15, 21, 14, 20, -15, 15, 5, 27};
return QLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
}
@@ -2917,12 +2817,10 @@ LayerTestResult<int8_t, 2> QLstmTest1(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
- boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
- {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}));
+ std::vector<int8_t> input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8);
- boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
- {127, 127, -108, -67, 127, 127}));
+ std::vector<int8_t> expectedOutput = {127, 127, -108, -67, 127, 127};
return QLstmTestImpl1(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
}
@@ -2933,12 +2831,10 @@ LayerTestResult<int8_t, 2> QLstmTest2(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
- boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
- {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}));
+ std::vector<int8_t> input = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8);
- boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
- {127, 127, 127, -128, 127, 127}));
+ std::vector<int8_t> expectedOutput = {127, 127, 127, -128, 127, 127};
return QLstmTestImpl2(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
index ba827b1860..0f045d1198 100644
--- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
@@ -40,11 +40,10 @@ LayerTestResult<T, OutputDim> MeanTestHelper(
outputTensorInfo.SetQuantizationScale(scale);
outputTensorInfo.SetQuantizationOffset(offset);
- auto input = MakeTensor<T, InputDim>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
+ auto input = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
- LayerTestResult<T, OutputDim> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, OutputDim>(
- outputTensorInfo, ConvertToDataType<ArmnnType>(outputData, outputTensorInfo));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = ConvertToDataType<ArmnnType>(outputData, outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -61,14 +60,17 @@ LayerTestResult<T, OutputDim> MeanTestHelper(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, OutputDim>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index b8dc5f5667..c7b082183c 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -545,10 +545,11 @@ LayerTestResult<float,4> CompareMultiplicationTest(
inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- LayerTestResult<float,4> comparisonResult(outputTensorInfo);
+ auto input0 = MakeRandomTensor<float>(inputTensorInfo0, 803506992);
+ auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 54902257);
- auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
- auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
@@ -580,17 +581,20 @@ LayerTestResult<float,4> CompareMultiplicationTest(
inputHandle1Ref->Allocate();
outputHandleRef->Allocate();
- CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
- CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
- CopyDataToITensorHandle(inputHandle0Ref.get(), input0.origin());
- CopyDataToITensorHandle(inputHandle1Ref.get(), input1.origin());
+ CopyDataToITensorHandle(inputHandle0.get(), input0.data());
+ CopyDataToITensorHandle(inputHandle1.get(), input1.data());
+ CopyDataToITensorHandle(inputHandle0Ref.get(), input0.data());
+ CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
workload->PostAllocationConfigure();
workload->Execute();
workloadRef->PostAllocationConfigure();
workloadRef->Execute();
- CopyDataFromITensorHandle(comparisonResult.output.origin(), outputHandle.get());
- CopyDataFromITensorHandle(comparisonResult.outputExpected.origin(), outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
- return comparisonResult;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index b52dcd5303..153afd9cd7 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -44,16 +44,18 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
- LayerTestResult<float,4> ret(outputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+ std::vector<float> input =
+ {
// Batch #0
1.0f, 2.0f,
3.0f, 4.0f,
// Batch #1
5.0f, 6.0f,
7.0f, 8.0f
- }));
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
float alpha = 1.f;
float beta = 1.f;
@@ -75,7 +77,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
- armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -85,11 +87,11 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
switch (normMethod)
{
@@ -104,23 +106,34 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
// pow((kappa + (accumulatedScale * alpha)), beta)
// ...where accumulatedScale is the sum of every element squared.
float divisor[inputNum];
- for(int i = 0; i < armnn::numeric_cast<int>(inputNum); i++)
+
+ float accumulatedScale1 = 0.0f;
+ for (size_t i = 0; i < input.size()/2; ++i)
+ {
+ accumulatedScale1 += input[i]*input[i];
+ }
+
+ float accumulatedScale2 = 0.0f;
+ for (size_t i = input.size()/2; i < input.size(); ++i)
{
- float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
- input[i][0][0][1]*input[i][0][0][1] +
- input[i][0][1][0]*input[i][0][1][0] +
- input[i][0][1][1]*input[i][0][1][1];
- divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
+ accumulatedScale2 += input[i]*input[i];
}
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
- std::vector<float>({input[0][0][0][0]/divisor[0],
- input[0][0][0][1]/divisor[0],
- input[0][0][1][0]/divisor[0],
- input[0][0][1][1]/divisor[0],
- input[1][0][0][0]/divisor[1],
- input[1][0][0][1]/divisor[1],
- input[1][0][1][0]/divisor[1],
- input[1][0][1][1]/divisor[1]}));
+
+ divisor[0] = powf((kappa + accumulatedScale1 * alpha), beta);
+ divisor[1] = powf((kappa + accumulatedScale2 * alpha), beta);
+
+ std::vector<float> output;
+ unsigned int divisorIndex = 0;
+ for (size_t i = 0; i < input.size(); ++i)
+ {
+ if (i == input.size()/2)
+ {
+ divisorIndex++;
+ }
+ output.emplace_back(input[i]/divisor[divisorIndex]);
+ }
+
+ expectedOutput = output;
break;
}
case armnn::NormalizationAlgorithmChannel::Across:
@@ -131,19 +144,14 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
// ...where adjacent channels means within half the normSize for the channel
// The test data has only one channel, so this is simplified below.
std::vector<float> outputVector;
- for (int n = 0; n < armnn::numeric_cast<int>(inputNum); ++n)
+
+ for (unsigned int i = 0; i < input.size(); ++i)
{
- for (int h = 0; h < armnn::numeric_cast<int>(inputHeight); ++h)
- {
- for (int w = 0; w < armnn::numeric_cast<int>(inputWidth); ++w)
- {
- float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
- float scale = powf((kappa + accumulatedScale * alpha), -beta);
- outputVector.push_back(input[n][0][h][w] * scale);
- }
- }
+ float accumulatedScale = input[i]*input[i];
+ float scale = powf((kappa + accumulatedScale * alpha), -beta);
+ outputVector.push_back(input[i] * scale);
}
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
+ expectedOutput = outputVector;
break;
}
default:
@@ -162,7 +170,10 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
}
}
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
@@ -188,16 +199,18 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
- LayerTestResult<float,4> ret(outputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+ std::vector<float> input =
+ {
// Batch #0
1.0f, 2.0f,
3.0f, 4.0f,
// Batch #1
5.0f, 6.0f,
7.0f, 8.0f
- }));
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
float alpha = 1.f;
float beta = 1.f;
@@ -219,7 +232,7 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
- armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -229,11 +242,11 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
switch (normMethod)
{
@@ -243,9 +256,8 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
{
case armnn::NormalizationAlgorithmChannel::Across:
{
- std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
- 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
+ expectedOutput = { 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
+ 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
break;
}
default:
@@ -264,7 +276,10 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
}
}
- return ret;
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
LayerTestResult<float,4> CompareNormalizationTestImpl(
@@ -297,7 +312,10 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
LayerTestResult<float,4> ret(outputTensorInfo);
- auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
+ auto input = MakeRandomTensor<float>(inputTensorInfo, 111234);
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
constexpr float alpha = 1.f;
constexpr float beta = 1.f;
@@ -330,9 +348,9 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
- if (!ret.supported)
+ ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
+ reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+ if (!ret.m_Supported)
{
return ret;
}
@@ -346,19 +364,125 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
+ ret.m_ActualData = actualOutput;
+ ret.m_ExpectedData = expectedOutput;
return ret;
}
+LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::NormalizationAlgorithmChannel normChannel,
+ armnn::NormalizationAlgorithmMethod normMethod)
+{
+ const unsigned int inputHeight = 1;
+ const unsigned int inputWidth = 2;
+ const unsigned int inputChannels = 3;
+ const unsigned int inputNum = 2;
+
+ unsigned int outputHeight = inputHeight;
+ unsigned int outputWidth = inputWidth;
+ unsigned int outputChannels = inputChannels;
+ unsigned int outputNum = inputNum;
+
+ unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
+ unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
+
+ auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+ std::vector<float> input =
+ {
+ // Batch #0
+ -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
+ // Batch #1
+ -2.1f, 2.6f, 1.7f, 1.2f, -1.0f, 0.7f,
+ };
+
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
+
+ float alpha = 4.f;
+ float beta = 0.5f;
+ float kappa = 9.f;
+ uint32_t normSize = 5;
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::NormalizationQueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+ data.m_Parameters.m_NormChannelType = normChannel;
+ data.m_Parameters.m_NormMethodType = normMethod;
+ data.m_Parameters.m_NormSize = normSize;
+ data.m_Parameters.m_Alpha = alpha;
+ data.m_Parameters.m_Beta = beta;
+ data.m_Parameters.m_K = kappa;
+ data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, expectedOutput.data());
+ armnn::NormalizationQueueDescriptor refData = data;
+ armnn::WorkloadInfo refInfo = info;
+ SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ switch (normMethod)
+ {
+ case armnn::NormalizationAlgorithmMethod::LocalBrightness:
+ {
+ switch (normChannel)
+ {
+ case armnn::NormalizationAlgorithmChannel::Across:
+ {
+ expectedOutput = { -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f,
+ -0.259993f, 0.321897f, 0.210471f, 0.263625f, -0.219687f, 0.153781f, };
+ break;
+ }
+ default:
+ {
+ throw armnn::UnimplementedException("Unsupported normalisation channel type, "
+ "only Across and Within are supported");
+ }
+ }
+ break;
+ }
+ case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
+ default:
+ {
+ throw armnn::UnimplementedException("Unsupported normalisation method type, "
+ "only LocalBrightness is supported");
+ }
+ }
+
+ return LayerTestResult<float, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
} // anonymous namespace
LayerTestResult<float,4> SimpleNormalizationAcrossTest(
@@ -405,3 +529,17 @@ LayerTestResult<float,4> CompareNormalizationTest(
workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
normChannel, normMethod);
}
+
+LayerTestResult<float,4> AcrossChannelNormalizationTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+ auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
+ return AcrossChannelNormalizationTestImpl(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ normChannel,
+ normMethod);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp
index 3a276e8c4c..bbbbc4fe02 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.hpp
@@ -35,3 +35,8 @@ LayerTestResult<float, 4> CompareNormalizationTest(
const armnn::ITensorHandleFactory& refTensorHandleFactory,
armnn::NormalizationAlgorithmChannel normChannel,
armnn::NormalizationAlgorithmMethod normMethod);
+
+LayerTestResult<float, 4> AcrossChannelNormalizationTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 086f8757dd..a09e387b0e 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -54,15 +54,11 @@ LayerTestResult<T, 2> Pad2dTestCommon(
},
qScale, qOffset);
- auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
-
- LayerTestResult<T, 2> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
-
armnn::PadQueueDescriptor descriptor;
std::vector<std::pair<unsigned int, unsigned int>> padList;
@@ -81,14 +77,17 @@ LayerTestResult<T, 2> Pad2dTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 2>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T>
@@ -140,15 +139,11 @@ LayerTestResult<T, 3> Pad3dTestCommon(
},
qScale, qOffset);
- auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
-
- LayerTestResult<T, 3> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
-
armnn::PadQueueDescriptor descriptor;
std::vector<std::pair<unsigned int, unsigned int>> PadList;
@@ -167,14 +162,17 @@ LayerTestResult<T, 3> Pad3dTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 3>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T>
@@ -380,10 +378,7 @@ LayerTestResult<T, 4> Pad4dTestCommon(
},
qScale, qOffset);
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -407,14 +402,17 @@ LayerTestResult<T, 4> Pad4dTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T>
@@ -453,10 +451,7 @@ LayerTestResult<T, 2> PadQAsymmTestCommon(
p, p, p, p, p, p, p
};
- auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
-
- LayerTestResult<T, 2> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -480,14 +475,17 @@ LayerTestResult<T, 2> PadQAsymmTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 2>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
//
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index 74d29f0250..91add545ec 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -27,10 +27,8 @@ LayerTestResult<T, 4> SimplePermuteTestImpl(
const std::vector<T>& outputExpectedData)
{
IgnoreUnused(memoryManager);
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -46,13 +44,16 @@ LayerTestResult<T, 4> SimplePermuteTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputExpectedData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index 2275b9f07a..1eaf1f9d66 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -37,8 +37,10 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
armnn::Pooling2dDescriptor descriptor,
float qScale,
int32_t qOffset,
- const boost::multi_array<T, 4>& input,
- const boost::multi_array<T, 4>& outputExpected)
+ const std::vector<T>& input,
+ const std::vector<T>& outputExpected,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& outputShape)
{
IgnoreUnused(memoryManager);
const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
@@ -47,15 +49,15 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
auto widthIndex = dimensionIndices.GetWidthIndex();
auto channelsIndex = dimensionIndices.GetChannelsIndex();
- unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[heightIndex]);
- unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[widthIndex]);
- unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
- unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[heightIndex]);
+ unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[widthIndex]);
+ unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[channelsIndex]);
+ unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
- unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
- unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
- unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
- unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+ unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputShape[heightIndex]);
+ unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputShape[widthIndex]);
+ unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputShape[channelsIndex]);
+ unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputShape[0]);
armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
@@ -73,6 +75,7 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
}
LayerTestResult<T, 4> result(outputTensorInfo);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -89,10 +92,10 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
- queueDescriptor.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
- if (!result.supported)
+ result.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
+ queueDescriptor.m_Parameters,
+ reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+ if (!result.m_Supported)
{
return result;
}
@@ -102,13 +105,14 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- result.outputExpected = outputExpected;
+ result.m_ActualData = actualOutput;
+ result.m_ExpectedData = outputExpected;
return result;
}
@@ -194,15 +198,14 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
- auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
+ auto input = QuantizedVector<T>(inputData, qScale, qOffset);
// These were calculated manually.
- auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
- boost::multi_array<T, 4> outputExpected(shape);
+ std::vector<T> outputExpected;
if (forceNoPadding)
{
- outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ outputExpected = QuantizedVector<T>(
+ {
8.0f, 8.0f, 8.0f,
9.0f, 7.0f, 9.0f,
9.0f, 9.0f, 9.0f,
@@ -219,12 +222,12 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
-1.0f, 0.0f, 0.0f,
-1.0f, -1.0f, -1.0f
},
- qScale, qOffset));
+ qScale, qOffset);
}
else
{
- outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ outputExpected = QuantizedVector<T>(
+ {
0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
@@ -241,11 +244,12 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
},
- qScale, qOffset));
+ qScale, qOffset);
}
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -312,12 +316,9 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
outputData = tmp1;
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
-
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -384,12 +385,9 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
outputData = tmp1;
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
-
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -422,26 +420,23 @@ LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> inputVec;
+ std::vector<T> input;
for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
{
- inputVec.push_back(1);
+ input.push_back(1);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
-
- std::vector<T> outputVec;
+ std::vector<T> outputExpected;
for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
{
- outputVec.push_back(1);
+ outputExpected.push_back(1);
}
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
-
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -499,12 +494,9 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
outputData = tmp1;
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
-
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -522,25 +514,26 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
2.0f, 1.0f, 5.0f, 2.0f,
1.0f, 2.0f, 2.0f, 1.0f,
5.0f, 4.0f, 1.0f, 5.0f,
2.0f, 1.0f, 5.0f, 2.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
3.0f, 3.0f,
3.0f, 3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -558,8 +551,8 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
@@ -570,19 +563,20 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
3.0f, 3.0f, 3.0f,
3.0f, 3.0f, 3.0f,
3.0f, 3.0f, 3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -600,8 +594,8 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
@@ -610,18 +604,19 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
3.0f, 3.0f,
3.0f, 3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -639,8 +634,8 @@ LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
@@ -649,17 +644,18 @@ LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -677,8 +673,8 @@ LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
@@ -689,17 +685,18 @@ LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -727,21 +724,22 @@ LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
// Construct input data.
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
1.0f, 3.0f, 4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
// These were calculated manually.
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
0.0f, 3.0f, 0.0f, 3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -788,7 +786,9 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
+ std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 81715);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
@@ -817,10 +817,10 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
- data.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
- if (!comparisonResult.supported)
+ comparisonResult.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
+ data.m_Parameters,
+ reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+ if (!comparisonResult.m_Supported)
{
return comparisonResult;
}
@@ -838,14 +838,17 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
workload->Execute();
workloadRef->Execute();
- CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
+
+ comparisonResult.m_ActualData = actualOutput;
+ comparisonResult.m_ExpectedData = expectedOutput;
return comparisonResult;
}
@@ -924,14 +927,15 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
+ auto input = QuantizedVector<T>(inputData, qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ auto outputExpected =
forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
- QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
+ QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
//
@@ -1003,14 +1007,15 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
+ auto input = QuantizedVector<T>(inputData, qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ auto outputExpected =
forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
- QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
+ QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
@@ -1044,25 +1049,26 @@ LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
-1.0f, -2.0f, 3.0f, 4.0f,
-1.0f, -2.0f, 3.0f, 4.0f,
1.0f, 2.0f, -3.0f, -4.0f,
1.0f, 2.0f, -3.0f, -4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
-1.0f, 3.0f, 4.0f,
1.0f, 3.0f, 4.0f,
1.0f, 2.0f, -4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1095,26 +1101,27 @@ LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
-1.0f, -2.0f, 3.0f, 4.0f,
-1.0f, -2.0f, 3.0f, 4.0f,
1.0f, 2.0f, -3.0f, -4.0f,
1.0f, 2.0f, -3.0f, -4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
-1.0f, 3.0f, 4.0f, 4.0f,
2.0f, 3.0f, 4.0f, 4.0f,
2.0f, 3.0f, 4.0f, 4.0f,
2.0f, 2.0f, 2.0f, -3.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1147,25 +1154,26 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
12.0f, 20.0f, 32.0f, 40.0f,
12.0f, 20.0f, 32.0f, 40.0f,
12.0f, 20.0f, 32.0f, 40.0f,
12.0f, 20.0f, 32.0f, 40.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
3.0f, 13.0f, 10.0f,
6.0f, 26.0f, 20.0f,
3.0f, 13.0f, 10.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1199,24 +1207,25 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
2.0f, 3.5f,
2.0f, 3.5f
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1249,26 +1258,27 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
9.0f, 27.0f, 18.0f, 36.0f,
18.0f, 9.0f, 18.0f, 9.0f,
27.0f, 18.0f, 9.0f, 27.0f,
9.0f, 27.0f, 9.0f, 18.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
7.0f, 11.0f, 13.0f, 9.0f,
12.0f, 17.0f, 19.0f, 13.0f,
12.0f, 16.0f, 16.0f, 10.0f,
9.0f, 11.0f, 12.0f, 7.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1301,25 +1311,26 @@ LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
2.0f, 4.0f, 8.0f, 16.0f,
4.0f, 2.0f, 2.0f, 4.0f,
8.0f, 2.0f, 4.0f, 2.0f,
16.0f, 2.0f, 2.0f, 8.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
1.0f, 4.4721f, 8.0f,
4.4721f, 2.6457f, 2.236f,
8.0f, 1.4142f, 4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1352,26 +1363,27 @@ LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- QuantizedVector<T>({
+ auto input = QuantizedVector<T>(
+ {
1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f,
1.0f, 2.0f, 3.0f, 4.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- QuantizedVector<T>({
+ auto outputExpected = QuantizedVector<T>(
+ {
1.0540f, 1.7638f, 2.5385f, 2.3570f,
1.2909f, 2.1602f, 3.1091f, 2.8867f,
1.2909f, 2.1602f, 3.1091f, 2.8867f,
1.0540f, 1.7638f, 2.5385f, 2.3570f,
},
- qScale, qOffset));
+ qScale, qOffset);
return SimplePooling2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
+ workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
+ input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index a5c53d0e58..3cf85817c8 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -61,22 +61,18 @@ LayerTestResult<T, 4> PreluTest(
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
};
- auto input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData,
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset()));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData,
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset());
- auto alpha = MakeTensor<T, 4>(alphaTensorInfo,
- armnnUtils::QuantizedVector<T>(alphaData,
+ std::vector<T> alpha = armnnUtils::QuantizedVector<T>(alphaData,
alphaTensorInfo.GetQuantizationScale(),
- alphaTensorInfo.GetQuantizationOffset()));
+ alphaTensorInfo.GetQuantizationOffset());
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected =
- MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputExpectedData,
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData,
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset());
std::unique_ptr <armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr <armnn::ITensorHandle> alphaHandle = tensorHandleFactory.CreateTensorHandle(alphaTensorInfo);
@@ -94,12 +90,15 @@ LayerTestResult<T, 4> PreluTest(
alphaHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
- CopyDataToITensorHandle(alphaHandle.get(), &alpha[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(alphaHandle.get(), alpha.data());
workload->Execute();
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index 5a36856e54..029d50e718 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -31,10 +31,7 @@ LayerTestResult<T, Dim> QuantizeTestImpl(
armnn::QuantizeQueueDescriptor descriptor)
{
IgnoreUnused(memoryManager);
- boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
-
- LayerTestResult<T, Dim> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -48,13 +45,16 @@ LayerTestResult<T, Dim> QuantizeTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, Dim>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
index aeed272446..c483d2cdc6 100644
--- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
@@ -14,7 +14,7 @@
template<typename T, std::size_t n>
LayerTestResult<int32_t, 1> RankTest(
armnn::TensorInfo inputTensorInfo,
- boost::multi_array<T, n> input,
+ std::vector<T> input,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory)
@@ -24,8 +24,8 @@ LayerTestResult<int32_t, 1> RankTest(
const armnn::TensorShape outputShape{armnn::Dimensionality::Scalar};
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
- LayerTestResult<int32_t , 1> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<uint32_t, 1>(outputTensorInfo, { n });
+ std::vector<int32_t> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<int32_t> expectedOutput = { n };
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -40,13 +40,16 @@ LayerTestResult<int32_t, 1> RankTest(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<int32_t, 1>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T>
@@ -56,9 +59,7 @@ LayerTestResult<int32_t, 1> RankDimSize1Test(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputTensorInfo({6}, ArmnnType, 1.0f, 0);
- auto input = MakeTensor<T, 1>(inputTensorInfo, ConvertToDataType<ArmnnType>(
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.3f, -0.5f },
- inputTensorInfo));
+ auto input = ConvertToDataType<ArmnnType>({ -37.5f, -15.2f, -8.76f, -2.0f, -1.3f, -0.5f }, inputTensorInfo);
return RankTest<T, 1>(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory);
}
@@ -70,9 +71,7 @@ LayerTestResult<int32_t, 1> RankDimSize2Test(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputTensorInfo({1, 3}, ArmnnType, 1.0f, 0);
- auto input = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(
- { -37.5f, -15.2f, -8.76f },
- inputTensorInfo));
+ auto input = ConvertToDataType<ArmnnType>({ -37.5f, -15.2f, -8.76f }, inputTensorInfo);
return RankTest<T, 2>(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory);
}
@@ -84,9 +83,7 @@ LayerTestResult<int32_t, 1> RankDimSize3Test(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputTensorInfo({1, 3, 2}, ArmnnType, 1.0f, 0);
- auto input = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(
- { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f},
- inputTensorInfo));
+ auto input = ConvertToDataType<ArmnnType>({ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f}, inputTensorInfo);
return RankTest<T, 3>(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory);
}
@@ -98,10 +95,10 @@ LayerTestResult<int32_t, 1> RankDimSize4Test(
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType, 1.0f, 0);
- auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+ auto input = ConvertToDataType<ArmnnType>(
{ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f },
- inputTensorInfo));
+ inputTensorInfo);
return RankTest<T, 4>(inputTensorInfo, input, workloadFactory, memoryManager, tensorHandleFactory);
}
diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp
index 2757eceb8a..0aacee1aa5 100644
--- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp
@@ -15,7 +15,7 @@
template<typename T , std::size_t n>
LayerTestResult<int32_t, 1> RankTest(
armnn::TensorInfo inputTensorInfo,
- boost::multi_array<T, n> input,
+ std::vector<T> input,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
index 18821b9549..9f5422bcbc 100644
--- a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
@@ -28,10 +28,9 @@ LayerTestResult<float, 4> ReduceTestCommon(
bool keepDims = false)
{
IgnoreUnused(memoryManager);
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
+ auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -65,13 +64,16 @@ LayerTestResult<float, 4> ReduceTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<float, 4>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // namespace
diff --git a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
index 589cc03cbc..7ce03ad13a 100644
--- a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
@@ -30,10 +30,9 @@ LayerTestResult<float, 4> ReductionTestCommon(
bool keepDims = false)
{
IgnoreUnused(memoryManager);
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
+ auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
+ std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -67,13 +66,16 @@ LayerTestResult<float, 4> ReductionTestCommon(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<float, 4>(actualOutput,
+ outputData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // namespace
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index fbedb943f4..c3aacad4b0 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -25,10 +25,7 @@ LayerTestResult<T, NumDims> SimpleReshapeTestImpl(
const std::vector<T>& outputExpectedData)
{
IgnoreUnused(memoryManager);
- auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
-
- LayerTestResult<T, NumDims> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, outputExpectedData);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -43,13 +40,16 @@ LayerTestResult<T, NumDims> SimpleReshapeTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->Execute();
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, NumDims>(actualOutput,
+ outputExpectedData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index a2a804d54e..7706bde60d 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -95,10 +95,10 @@ LayerTestResult<T, NumDims> ResizeTestImpl(
std::vector<T> inputData =
armnnUtils::QuantizedVector<T>(params.m_InputData, params.m_InQuantScale, params.m_InQuantOffset);
- std::vector<T> expectedOutputData =
- armnnUtils::QuantizedVector<T>(params.m_ExpectedOutputData,
- params.m_OutQuantScale,
- params.m_OutQuantOffset);
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(params.m_ExpectedOutputData,
+ params.m_OutQuantScale,
+ params.m_OutQuantOffset);
if (params.m_DataLayout == armnn::DataLayout::NHWC)
{
@@ -106,11 +106,6 @@ LayerTestResult<T, NumDims> ResizeTestImpl(
PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
}
- auto input = MakeTensor<T, NumDims>(inputInfo, inputData);
-
- LayerTestResult<T, NumDims> result(outputInfo);
- result.outputExpected = MakeTensor<T, NumDims>(outputInfo, expectedOutputData);
-
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
@@ -132,13 +127,17 @@ LayerTestResult<T, NumDims> ResizeTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->PostAllocationConfigure();
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
- return result;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, NumDims>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index a2e6e2473f..f3e28363c2 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -39,12 +39,9 @@ LayerTestResult<T, NumDims> SliceTestImpl(
outputInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, NumDims> input =
- MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
-
- LayerTestResult<T, NumDims> result(outputInfo);
- result.outputExpected =
- MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
+ std::vector<T> actualOutput(outputInfo.GetNumElements());
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
@@ -64,9 +61,12 @@ LayerTestResult<T, NumDims> SliceTestImpl(
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(result.output.data(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, NumDims>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 9688ce49f2..375bdaa130 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -82,10 +82,10 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
outputTensorInfo.SetQuantizationScale(qScale);
outputTensorInfo.SetQuantizationOffset(qOffset);
- LayerTestResult<T, n> ret(outputTensorInfo);
-
// Each row is independently softmax'd.
- auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -102,18 +102,18 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), input.origin());
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
ARMNN_ASSERT(workload);
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
- ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
-
- return ret;
+ return LayerTestResult<T, n>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -259,9 +259,9 @@ LayerTestResult<T, 2> CompareSoftmaxTestImpl(
outputTensorInfo.SetQuantizationScale(qScale);
outputTensorInfo.SetQuantizationOffset(qOffset);
-
- LayerTestResult<T, 2> ret(outputTensorInfo);
- auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
+ auto input = MakeRandomTensor<T>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+ std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -292,17 +292,20 @@ LayerTestResult<T, 2> CompareSoftmaxTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
- CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
+ CopyDataToITensorHandle(inputHandleRef.get(), input.data());
ExecuteWorkload(*workload, memoryManager);
workloadRef->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+ CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
- return ret;
+ return LayerTestResult<T, 2>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index 6dbf82090b..44a37f4fe8 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -58,12 +58,9 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
-
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -77,13 +74,16 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index 8ff9157ec0..9175aec8c6 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -59,12 +59,9 @@ LayerTestResult<T, 4> SpaceToDepthTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
- armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
-
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
- armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -78,13 +75,16 @@ LayerTestResult<T, 4> SpaceToDepthTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index e2040b5b20..e19a3216c3 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -47,7 +47,6 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
unsigned int outputHeight2 = inputHeight;
unsigned int outputChannels2 = 2;
-
// Define the tensor descriptors.
armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
@@ -75,13 +74,8 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
outputTensorInfo4.SetQuantizationOffset(qOffset);
}
- LayerTestResult<T,3> ret1(outputTensorInfo1);
- LayerTestResult<T,3> ret2(outputTensorInfo2);
- LayerTestResult<T,3> ret3(outputTensorInfo3);
- LayerTestResult<T,3> ret4(outputTensorInfo4);
-
- auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
- armnnUtils::QuantizedVector<T>({
+ auto input = armnnUtils::QuantizedVector<T>(
+ {
1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
@@ -103,12 +97,11 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
},
- qScale, qOffset)
- ));
+ qScale, qOffset);
// Channel 0 of the original input.
- ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
- armnnUtils::QuantizedVector<T>({
+ auto expectedData1 = armnnUtils::QuantizedVector<T>(
+ {
1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
@@ -116,12 +109,11 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
},
- qScale, qOffset)
- ));
+ qScale, qOffset);
// Channel 1 & 2 of the original input.
- ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
- armnnUtils::QuantizedVector<T>({
+ auto expectedData2 = armnnUtils::QuantizedVector<T>(
+ {
31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
@@ -136,12 +128,11 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
},
- qScale, qOffset)
- ));
+ qScale, qOffset);
// Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
- ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
- armnnUtils::QuantizedVector<T>({
+ auto expectedData3 = armnnUtils::QuantizedVector<T>(
+ {
31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
@@ -149,12 +140,11 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
},
- qScale, qOffset)
- ));
+ qScale, qOffset);
// Channel 1 of return 2.
- ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
- armnnUtils::QuantizedVector<T>({
+ auto expectedData4 = armnnUtils::QuantizedVector<T>(
+ {
61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
@@ -162,8 +152,12 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
},
- qScale, qOffset)
- ));
+ qScale, qOffset);
+
+ std::vector<T> actualData1(outputTensorInfo1.GetNumElements());
+ std::vector<T> actualData2(outputTensorInfo2.GetNumElements());
+ std::vector<T> actualData3(outputTensorInfo3.GetNumElements());
+ std::vector<T> actualData4(outputTensorInfo4.GetNumElements());
// NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
// have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
@@ -219,12 +213,12 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
outputHandle1->Allocate();
outputHandle2->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
- CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
+ CopyDataFromITensorHandle(actualData1.data(), outputHandle1.get());
+ CopyDataFromITensorHandle(actualData2.data(), outputHandle2.get());
// Do the second split.
armnn::SplitterQueueDescriptor data2;
@@ -243,8 +237,13 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
ExecuteWorkload(*workload2, memoryManager);
- CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
- CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
+ CopyDataFromITensorHandle(actualData3.data(), outputHandle3.get());
+ CopyDataFromITensorHandle(actualData4.data(), outputHandle4.get());
+
+ LayerTestResult<T,3> ret1(actualData1, expectedData1, outputHandle1->GetShape(), outputTensorInfo1.GetShape());
+ LayerTestResult<T,3> ret2(actualData2, expectedData2, outputHandle2->GetShape(), outputTensorInfo2.GetShape());
+ LayerTestResult<T,3> ret3(actualData3, expectedData3, outputHandle3->GetShape(), outputTensorInfo3.GetShape());
+ LayerTestResult<T,3> ret4(actualData4, expectedData4, outputHandle4->GetShape(), outputTensorInfo4.GetShape());
std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
@@ -259,10 +258,10 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
float qScale, int32_t qOffset)
{
IgnoreUnused(memoryManager);
+
const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
- auto input = MakeTensor<T, 3>(
- tensorInfo,
- armnnUtils::QuantizedVector<T>({
+ auto input = armnnUtils::QuantizedVector<T>(
+ {
1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
@@ -284,7 +283,9 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
},
- qScale, qOffset));
+ qScale, qOffset);
+
+ std::vector<T> actualOutput(tensorInfo.GetNumElements());
std::vector<unsigned int> origin = { 0, 0, 0 };
armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
@@ -309,15 +310,16 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
workload->Execute();
- LayerTestResult<T, 3> ret(tensorInfo);
- CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
- ret.outputExpected = input;
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 3>(actualOutput,
+ input,
+ outputHandle->GetShape(),
+ tensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
index 16e709d528..25989f90ed 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -33,14 +33,13 @@ LayerTestResult<T, outputDimLength> StackTestHelper(
{
IgnoreUnused(memoryManager);
unsigned int numInputs = static_cast<unsigned int>(inputData.size());
- std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
+ std::vector<std::vector<T>> inputs;
for (unsigned int i = 0; i < numInputs; ++i)
{
- inputs.push_back(MakeTensor<T, outputDimLength-1>(inputTensorInfo, inputData[i]));
+ inputs.emplace_back(inputData[i]);
}
- LayerTestResult<T, outputDimLength> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, outputDimLength>(outputTensorInfo, outputExpectedData);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
for (unsigned int i = 0; i < numInputs; ++i)
@@ -60,7 +59,7 @@ LayerTestResult<T, outputDimLength> StackTestHelper(
std::unique_ptr<armnn::ITensorHandle>& inputHandle = inputHandles[i];
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
inputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), inputs[i].origin());
+ CopyDataToITensorHandle(inputHandle.get(), inputs[i].data());
}
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -70,9 +69,12 @@ LayerTestResult<T, outputDimLength> StackTestHelper(
workload->Execute();
- CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return result;
+ return LayerTestResult<T, outputDimLength>(actualOutput,
+ outputExpectedData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index 66a3b14e3f..af4b089cde 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -40,12 +40,9 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- boost::multi_array<T, InDim> input =
- MakeTensor<T, InDim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
-
- LayerTestResult<T, OutDim> ret(outputTensorInfo);
- ret.outputExpected =
- MakeTensor<T, OutDim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle =
tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
@@ -66,9 +63,12 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
ExecuteWorkload(*workload, memoryManager);
- CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, OutDim>(actualOutput,
+ expectedOutput,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 85ce7e5e6f..cd775729cd 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -183,8 +183,8 @@ LayerTestResult<T, 4> TransposeConvolution2dTest(
// execute test
TransposeConvolution2dTestImpl(workloadFactory,
- memoryManager,
- tensorHandleFactory,
+ memoryManager,
+ tensorHandleFactory,
descriptor,
input,
output,
@@ -193,11 +193,10 @@ LayerTestResult<T, 4> TransposeConvolution2dTest(
// construct result object
LayerTestResult<T, 4> testResult(outputInfo);
- testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
- testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
- armnnUtils::QuantizedVector<T>(expectedOutputData,
- outputInfo.GetQuantizationScale(),
- outputInfo.GetQuantizationOffset()));
+ testResult.m_ActualData = output.second;
+ testResult.m_ExpectedData = armnnUtils::QuantizedVector<T>(expectedOutputData,
+ outputInfo.GetQuantizationScale(),
+ outputInfo.GetQuantizationOffset());
return testResult;
}
@@ -611,6 +610,8 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
std::vector<int32_t> biasData = { -12, -8 };
+ std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
+
std::vector<uint8_t> expectedOutputData =
{
9, 13, 21, 19, 27,
@@ -665,11 +666,12 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
ExecuteWorkload(*workload, memoryManager);
- LayerTestResult<uint8_t, 4> ret(outputInfo);
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
- ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<uint8_t, 4>(actualOutput,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
}
//
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
index d0f9e82197..6be8bcb5cb 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
@@ -28,10 +28,7 @@ LayerTestResult<T, 4> SimpleTransposeTestImpl(
const std::vector<T>& outputExpectedData)
{
IgnoreUnused(memoryManager);
- auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
-
- LayerTestResult<T, 4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -47,13 +44,16 @@ LayerTestResult<T, 4> SimpleTransposeTestImpl(
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
workload->Execute();
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
- return ret;
+ return LayerTestResult<T, 4>(actualOutput,
+ outputExpectedData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index e1729fc7de..918ef039a3 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -241,6 +241,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest)
// Pooling
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp
index c26f7bdae8..1048e73c1b 100644
--- a/src/backends/cl/test/ClMemCopyTests.cpp
+++ b/src/backends/cl/test/ClMemCopyTests.cpp
@@ -19,7 +19,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -27,7 +28,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -35,7 +37,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -43,7 +46,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 1b86d2e304..7c8e27710b 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -55,22 +55,22 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32);
TensorInfo tensorInfo({channels}, DataType::Float32);
- auto input = MakeTensor<float, 4>(inputTensorInfo,
- {
- 1.f, 4.f,
- 4.f, 2.f,
- 1.f, 6.f,
+ std::vector<float> input =
+ {
+ 1.f, 4.f,
+ 4.f, 2.f,
+ 1.f, 6.f,
- 1.f, 1.f,
- 4.f, 1.f,
- -2.f, 4.f
- });
+ 1.f, 1.f,
+ 4.f, 1.f,
+ -2.f, 4.f
+ };
// these values are per-channel of the input
- auto mean = MakeTensor<float, 1>(tensorInfo, { 3.f, -2.f });
- auto variance = MakeTensor<float, 1>(tensorInfo, { 4.f, 9.f });
- auto beta = MakeTensor<float, 1>(tensorInfo, { 3.f, 2.f });
- auto gamma = MakeTensor<float, 1>(tensorInfo, { 2.f, 1.f });
+ std::vector<float> mean = { 3.f, -2.f };
+ std::vector<float> variance = { 4.f, 9.f };
+ std::vector<float> beta = { 3.f, 2.f };
+ std::vector<float> gamma = { 2.f, 1.f };
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
@@ -84,10 +84,10 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
ScopedTensorHandle betaTensor(tensorInfo);
ScopedTensorHandle gammaTensor(tensorInfo);
- AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
- AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
- AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
- AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
+ AllocateAndCopyDataToITensorHandle(&meanTensor, mean.data());
+ AllocateAndCopyDataToITensorHandle(&varianceTensor, variance.data());
+ AllocateAndCopyDataToITensorHandle(&betaTensor, beta.data());
+ AllocateAndCopyDataToITensorHandle(&gammaTensor, gamma.data());
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -105,7 +105,7 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), input.data());
OpenClTimer openClTimer;
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 6cd26dfdd2..d12817e159 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -874,6 +874,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest)
// Resize Bilinear - NCHW data layout
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear, SimpleResizeBilinearTest<DataType::Float32>, DataLayout::NCHW)
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index 6a3d05d000..2bb9e3d431 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -20,7 +20,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -28,7 +29,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -36,7 +38,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -44,7 +47,8 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
- auto predResult = CompareTensors(result.output, result.outputExpected);
+ auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
+ result.m_ActualShape, result.m_ExpectedShape);
BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index 9acd0e41e2..df014d5a9b 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -63,10 +63,6 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
armnn::DataType::Float32);
- LayerTestResult<float, 4> result(inputTensorInfo);
-
- auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -87,7 +83,7 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
NeonTimer neonTimer;
// Start the timer.
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 228df0946f..df48877108 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -474,6 +474,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest)
// Softmax
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f)