aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/CreateWorkload.hpp116
-rw-r--r--src/armnn/test/GraphTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp56
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp84
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp2
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp8
10 files changed, 141 insertions, 141 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 3ea2c35061..12623e62a0 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -14,9 +14,9 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <boost/test/unit_test.hpp>
@@ -353,10 +353,10 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkload
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -411,10 +411,10 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlob
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -492,8 +492,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -555,8 +555,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -639,8 +639,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a
TensorShape inputShape = TensorShape{1, 32, 149, 149};
TensorShape outputShape = TensorShape{1, 32, 147, 147};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -692,23 +692,23 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
unsigned int numUnits = 4;
unsigned int outputSize = 4;
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
@@ -724,9 +724,9 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
if (layerDesc.m_PeepholeEnabled)
{
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
@@ -814,27 +814,27 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Weights and bias
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
// Allocate weights and bias
layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate();
@@ -977,27 +977,27 @@ std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory&
armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
// Create and allocate tensors
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
layer->m_BasicParameters.m_InputToCellWeights->Allocate();
@@ -1093,8 +1093,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>
(TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -1148,7 +1148,7 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({1, 2, 4, 4}, DataType)); // [ M, I, H, W ]
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({1, 2, 4, 4}, DataType)); // [ M, I, H, W ]
layer->m_Weight->Allocate();
// Creates extra layers.
@@ -1200,8 +1200,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -1245,8 +1245,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -2108,7 +2108,7 @@ std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFac
armnn::TensorInfo outputTensorInfo(outputShape, DataType);
auto constant = graph.AddLayer<ConstantLayer>("constant");
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(outputTensorInfo);
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
BOOST_TEST_CHECKPOINT("created constant layer");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 602575b352..69f96d43a3 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -14,7 +14,7 @@
#include <armnn/backends/IBackendInternal.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
#include <boost/test/unit_test.hpp>
@@ -603,7 +603,7 @@ BOOST_AUTO_TEST_CASE(CheckGraphConstTensorSharing)
float weight = 1.0f;
armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32}, &weight);
- fcLayer->m_Weight = std::make_shared<armnn::ScopedCpuTensorHandle>(constTensor);;
+ fcLayer->m_Weight = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
// point sharedWeightPtr to graph1's const tensor
sharedWeightPtr = fcLayer->m_Weight->GetConstTensor<float>();
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 7e8a898293..d0734d83be 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -18,9 +18,9 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <boost/test/unit_test.hpp>
@@ -45,23 +45,23 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
unsigned int numUnits = 4;
unsigned int outputSize = 4;
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
@@ -76,11 +76,11 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
if (!layerDesc.m_CifgEnabled)
{
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_CifgParameters.m_InputToInputWeights->Allocate();
layer->m_CifgParameters.m_RecurrentToInputWeights->Allocate();
@@ -89,9 +89,9 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
if (layerDesc.m_ProjectionEnabled)
{
- layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ outputSize, numUnits }, DataType::Float32));
- layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ outputSize }, DataType::Float32));
layer->m_ProjectionParameters.m_ProjectionWeights->Allocate();
layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
@@ -101,13 +101,13 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
{
if (!layerDesc.m_CifgEnabled)
{
- layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToInputWeights->Allocate();
}
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
@@ -276,7 +276,7 @@ void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
input->GetOutputSlot().SetTensorInfo(inputInfo);
Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -326,7 +326,7 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
input->GetOutputSlot().SetTensorInfo(inputInfo);
DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -529,7 +529,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
descriptor.m_MaxDetections = 3;
DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
- layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
+ layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
@@ -571,7 +571,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
- conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ conv2dLayer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -1211,16 +1211,16 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
conv->GetOutputSlot().SetTensorInfo(outputInfo);
batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
- conv->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
- batchNorm->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
- batchNorm->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
- batchNorm->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
- batchNorm->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
+ conv->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
+ batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
+ batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
+ batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
+ batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
if (convolution2dDescriptor.m_BiasEnabled)
{
std::vector<float> biasVector = { 11 };
ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32), biasVector);
- conv->m_Bias = std::make_unique<ScopedCpuTensorHandle>(bias);
+ conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
}
// Connect layers
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 25b0feaded..fa3f400569 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -9,8 +9,8 @@
#include <Graph.hpp>
#include <InternalTypes.hpp>
#include <layers/FullyConnectedLayer.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <string>
@@ -240,7 +240,7 @@ BOOST_AUTO_TEST_CASE(ConstantTesst)
const float Datum = 0.0f;
ConstTensor output0({outputShape, DataType::Float32}, &Datum);
- layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(output0);
+ layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
@@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(Convolution2dTest)
const float Datum = 0.0f;
ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
}
@@ -339,7 +339,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
const float Datum = 0.0f;
ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
}
@@ -379,7 +379,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
descriptor,
"detectionpostprocess");
- layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchorsTensor);
+ layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
}
@@ -415,7 +415,7 @@ BOOST_AUTO_TEST_CASE(FullyConnectedTest)
const float Datum = 0.0f;
ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
}
@@ -469,18 +469,18 @@ BOOST_AUTO_TEST_CASE(LstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
}
@@ -557,18 +557,18 @@ BOOST_AUTO_TEST_CASE(QLstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
@@ -585,18 +585,18 @@ BOOST_AUTO_TEST_CASE(QuantizedLstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 73ef8bea91..ecb876dc7a 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <fstream>
#include <map>
#include <queue>
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 4523e70437..d0d728bfab 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -299,7 +299,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
uint8_t tensor[] = { 1, 1, 1, 1, 1 };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(info1, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(info1, &tensor));
input->GetOutputSlot().SetTensorInfo(info0);
constant->GetOutputSlot().SetTensorInfo(info1);
@@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
constant->GetOutputSlot().SetTensorInfo(constantTermInfo);
float tensor[] = { 2.0f };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(constantTermInfo, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(constantTermInfo, &tensor));
add1->GetOutputSlot().SetTensorInfo(outputInfo);
input->GetOutputSlot().Connect(add1->GetInputSlot(0));
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index bb8e674b56..e4c1f2f413 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 12df462456..1dfe7f431c 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 7d7c6b2b0a..1ddf5262e8 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index a65012eef4..f93fa77b0d 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -72,8 +72,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
armnn::Convolution2dDescriptor descriptor;
auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- conv->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ conv->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ conv->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
conv->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -142,8 +142,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
armnn::FullyConnectedDescriptor descriptor;
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- fc->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ fc->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
fc->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");