aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2021-04-27 17:13:27 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-05-06 14:40:40 +0000
commit1f58f03d82c482626b1b4673b6c0e25da4338fb5 (patch)
treee92451e00d459a2fc0d870694460f482aa4c77ae /src/backends/backendsCommon/test/layerTests
parenta7a12f5c3654da554ad6197beff0f0fc54681c92 (diff)
downloadarmnn-1f58f03d82c482626b1b4673b6c0e25da4338fb5.tar.gz
IVGCVSW-5815 Generalise ConstCpuTensorHandle
* Generalises ConstCpuTensorHandle and inherited classes by removing 'Cpu' from aliases. * New renamed classes: ConstTensorHandle, TensorHandle, ScopedTensorHandle, PassthroughTensorHandle, ConstPassthroughTensorHandle. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests')
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp26
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp46
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp244
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp12
12 files changed, 179 insertions, 179 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index eb4f461eb9..969d5dbcd1 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -11,7 +11,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <reference/test/RefWorkloadFactoryHelper.hpp>
@@ -74,10 +74,10 @@ LayerTestResult<T, 4> BatchNormTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
armnn::BatchNormalizationQueueDescriptor descriptor;
descriptor.m_Mean = &meanTensor;
@@ -160,10 +160,10 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
@@ -644,10 +644,10 @@ LayerTestResult<float,4> CompareBatchNormTest(
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index 45c94d345b..c28ef40b45 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -11,7 +11,7 @@
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -101,7 +101,7 @@ LayerTestResult<T, 4> ConstantTestImpl(
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
+ armnn::ScopedTensorHandle constantTensor(inputTensorInfo);
AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
armnn::ConstantQueueDescriptor descriptor;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 4641e67aad..8f60415a66 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -13,7 +13,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -318,8 +318,8 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
// Permute the kernel if necessary
boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
if (layout == armnn::DataLayout::NHWC)
@@ -423,10 +423,10 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
armnn::Convolution2dQueueDescriptor data;
@@ -547,8 +547,8 @@ LayerTestResult<T,4> Convolution1dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo);
- armnn::ScopedCpuTensorHandle biasTensor(biasInfo);
+ armnn::ScopedTensorHandle weightsTensor(kernelInfo);
+ armnn::ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
@@ -1349,8 +1349,8 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -1722,11 +1722,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
if (biasEnabled)
{
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -1882,8 +1882,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -2095,8 +2095,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -2252,8 +2252,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
@@ -3007,8 +3007,8 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -3502,8 +3502,8 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
@@ -3756,8 +3756,8 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 3ee1fadd81..f68082762c 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -8,7 +8,7 @@
#include <armnn/Types.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -181,7 +181,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
auto outputScoresHandle = tensorHandleFactory.CreateTensorHandle(detectionScoresInfo);
auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo);
- armnn::ScopedCpuTensorHandle anchorsTensor(anchorsInfo);
+ armnn::ScopedTensorHandle anchorsTensor(anchorsInfo);
AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]);
armnn::DetectionPostProcessQueueDescriptor data;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index f8644007f2..157df99d64 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -6,7 +6,7 @@
#include "FakeQuantizationTestImpl.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -48,7 +48,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
data.m_Parameters.m_Min = min;
data.m_Parameters.m_Max = max;
- armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+ armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
armnn::FakeQuantizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 9176094eb2..cd7f4efe31 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -8,7 +8,7 @@
#include <QuantizeHelper.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataTypeUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -40,8 +40,8 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::FullyConnectedQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
+ armnn::ScopedTensorHandle weightsTensor(weightsDesc);
+ armnn::ScopedTensorHandle biasTensor(biasesDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 2e205dd58e..24a4dc4789 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -9,7 +9,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 7ee7a3465b..f32d367d37 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -10,7 +10,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 07a1f1e879..7a9652a8ea 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -9,7 +9,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -269,19 +269,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -971,23 +971,23 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
std::vector<float> projectionBiasVector(outputSize, 0.f);
auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1142,21 +1142,21 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1455,28 +1455,28 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
-
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
+
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1673,21 +1673,21 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
@@ -1891,22 +1891,22 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -2145,28 +2145,28 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
{-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(projectionWeightsInfo);
+ armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
@@ -2411,24 +2411,24 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
{-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(projectionWeightsInfo);
+ armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index 2e8e16f0c2..b52dcd5303 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -75,7 +75,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
- armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -219,7 +219,7 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
- armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index c4cc914115..9688ce49f2 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -9,7 +9,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 328e724b54..85ce7e5e6f 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -68,7 +68,7 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
}
// set up weights
- ScopedCpuTensorHandle weightsTensor(weights.first);
+ ScopedTensorHandle weightsTensor(weights.first);
TransposeConvolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
@@ -76,11 +76,11 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
- std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
+ std::unique_ptr<ScopedTensorHandle> biasesTensor;
if (descriptor.m_BiasEnabled)
{
// set up biases
- biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
+ biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
queueDescriptor.m_Bias = biasesTensor.get();
AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
@@ -643,8 +643,8 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());