aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-08-31 08:32:55 +0100
committerKeithARM <keith.davis@arm.com>2020-08-31 13:37:55 +0000
commitf500d6c22c7799dcc6b057d560fb88947ac63e6a (patch)
treeb81fcc7e1c35120916119a114a040cce3108ab45 /src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
parent7faf9a88637afae76373a439d7eca3507b77c98c (diff)
downloadarmnn-f500d6c22c7799dcc6b057d560fb88947ac63e6a.tar.gz
IVGCVSW-5249 Use CreateTensorHandle from ITensorHandleFactory in the test for all layers between C-D
Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I9583adf50e67e63e73833f400d1c50fbff57f60c
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp383
1 files changed, 235 insertions, 148 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 7f047cd323..d486bc0c19 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -121,6 +121,7 @@ void Generate3dPermuteVectorForConcat(
template<typename T> void PermuteTensorData(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const PermutationVector& mappings,
TensorInfo & inputTensorInfo,
const T * inputData,
@@ -137,10 +138,8 @@ template<typename T> void PermuteTensorData(
}
TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
PermuteQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
@@ -172,6 +171,7 @@ template<typename T> void PermuteTensorData(
template<typename T> void PermuteInputsForConcat(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
std::vector<TensorInfo> & inputTensorInfos,
std::vector<T *> & inputData,
std::vector<std::vector<T>> & inputDataStorage,
@@ -215,6 +215,7 @@ template<typename T> void PermuteInputsForConcat(
PermuteTensorData<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
permutations.first,
newTensorInfo,
inputData[nthInput],
@@ -240,6 +241,7 @@ template<typename T> void PermuteInputsForConcat(
template <typename T> void PermuteOutputForConcat(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo & tensorInfo,
const PermutationVector & permuteVector,
std::unique_ptr<ITensorHandle> && inputDataHandle,
@@ -262,6 +264,7 @@ template <typename T> void PermuteOutputForConcat(
PermuteTensorData<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
permuteVector,
resultTensorInfo,
&inputData[0],
@@ -273,6 +276,7 @@ template <typename T> void PermuteOutputForConcat(
template<typename T> void Concatenate(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
std::initializer_list<const TensorInfo> inputTensorInfosOrig,
std::initializer_list<T *> inputsOrig,
const TensorInfo& outputTensorInfoOrig,
@@ -311,6 +315,7 @@ template<typename T> void Concatenate(
//
PermuteInputsForConcat<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputTensorInfos,
inputs,
tmpInputDataStorage,
@@ -323,9 +328,9 @@ template<typename T> void Concatenate(
std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
inputHandles.reserve(inputCount);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
ConcatQueueDescriptor queueDescriptor;
OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
queueDescriptor.m_Parameters = viewsDescriptor;
@@ -338,21 +343,21 @@ template<typename T> void Concatenate(
queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
for (unsigned int i = 0; i < inputCount; ++i)
{
const TensorInfo& inputTensorInfo = inputTensorInfos[i];
- ARMNN_NO_DEPRECATE_WARN_BEGIN
+
std::unique_ptr<ITensorHandle> inputHandle =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle,
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle,
inputTensorInfo.GetShape(),
queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+
inputHandles.emplace_back(std::move(inputHandle));
}
@@ -362,9 +367,7 @@ template<typename T> void Concatenate(
{
for (unsigned int i = 0; i < inputCount; ++i)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfos[i]);
inputHandles.emplace_back(std::move(inputHandle));
}
}
@@ -399,6 +402,7 @@ template<typename T> void Concatenate(
{
PermuteOutputForConcat<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
outputTensorInfo,
permuteVector,
std::move(outputHandle),
@@ -418,6 +422,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 1> Concat1dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -433,7 +438,7 @@ LayerTestResult<T, 1> Concat1dTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ inputTensorInfo, inputTensorInfo, inputTensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -455,6 +460,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo& outputTensorInfo,
unsigned int dimension,
const float qScale,
@@ -496,7 +502,7 @@ LayerTestResult<T, 2> Concat2dTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ inputTensorInfo, inputTensorInfo, inputTensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -512,13 +518,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
{
@@ -549,13 +556,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
{
@@ -574,6 +582,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -615,7 +624,7 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -653,6 +662,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -694,7 +704,7 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -720,6 +730,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo& outputTensorInfo,
unsigned int dimension,
bool useSubtensor,
@@ -798,7 +809,7 @@ LayerTestResult<T, 3> Concat3dTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ inputTensorInfo, inputTensorInfo, inputTensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -814,13 +825,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
{
@@ -887,13 +899,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
{
@@ -960,6 +973,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim2TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor,
float qScale,
int32_t qOffset)
@@ -967,7 +981,7 @@ LayerTestResult<T, 3> Concat3dDim2TestImpl(
TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
{
@@ -998,6 +1012,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1075,7 +1090,7 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -1149,6 +1164,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1220,7 +1236,7 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -1288,6 +1304,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor,
float qScale,
int32_t qOffset)
@@ -1366,7 +1383,7 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -1404,6 +1421,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo& outputTensorInfo,
unsigned int dimension,
bool useSubtensor,
@@ -1452,6 +1470,7 @@ LayerTestResult<T, 4> Concat4dTestImpl(
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo, inputTensorInfo, inputTensorInfo},
{input0.data(), input1.data(), input2.data()},
outputTensorInfo,
@@ -1467,13 +1486,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1507,13 +1527,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1547,13 +1568,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim2TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1587,6 +1609,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim3TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool useSubtensor)
@@ -1594,7 +1617,7 @@ LayerTestResult<T, 4> Concat4dDim3TestImpl(
TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1628,6 +1651,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1673,6 +1697,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1713,6 +1738,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1749,6 +1775,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1779,6 +1806,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1818,6 +1846,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1855,6 +1884,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool useSubtensor)
@@ -1895,6 +1925,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1921,6 +1952,7 @@ template<DataType ArmnnType, typename T>
LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
IgnoreUnused(memoryManager);
@@ -2009,21 +2041,21 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
- ARMNN_NO_DEPRECATE_WARN_BEGIN
+
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
OriginsDescriptor desc = CreateDescriptorForConcatenation(
inputTensorShapes.begin(),inputTensorShapes.end(), 2);
@@ -2062,12 +2094,14 @@ template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
//
@@ -2076,7 +2110,8 @@ ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
LayerTestResult<float,3> ConcatTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2158,21 +2193,21 @@ LayerTestResult<float,3> ConcatTest(
std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
@@ -2201,162 +2236,194 @@ LayerTestResult<float,3> ConcatTest(
LayerTestResult<float, 1> Concat1dTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ 0.0f,
+ 0);
}
LayerTestResult<float, 3> Concat3dDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim2Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
- return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
+ useSubtensor, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
- workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim2Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim3Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
- return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0, useSubtensor);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, useSubtensor);
}
LayerTestResult<Half, 3> ConcatFloat16Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<BFloat16, 3> ConcatBFloat16Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2458,21 +2525,21 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
@@ -2501,7 +2568,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
LayerTestResult<uint8_t, 3> ConcatUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2595,21 +2663,20 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
@@ -2639,7 +2706,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
LayerTestResult<uint16_t, 3> ConcatUint16Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2730,21 +2798,21 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
@@ -2774,146 +2842,165 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
LayerTestResult<uint8_t, 1> Concat1dUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat3dDim2TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory, bool useSubtensor)
{
return Concat4dDim3TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
}