aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2021-04-27 17:13:27 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-05-06 14:40:40 +0000
commit1f58f03d82c482626b1b4673b6c0e25da4338fb5 (patch)
treee92451e00d459a2fc0d870694460f482aa4c77ae /src/armnn
parenta7a12f5c3654da554ad6197beff0f0fc54681c92 (diff)
downloadarmnn-1f58f03d82c482626b1b4673b6c0e25da4338fb5.tar.gz
IVGCVSW-5815 Generalise ConstCpuTensorHandle
* Generalises ConstCpuTensorHandle and inherited classes by removing 'Cpu' from aliases. * New renamed classes: ConstTensorHandle, TensorHandle, ScopedTensorHandle, PassthroughTensorHandle, ConstPassthroughTensorHandle. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Layer.cpp4
-rw-r--r--src/armnn/Layer.hpp4
-rw-r--r--src/armnn/LoadedNetwork.cpp14
-rw-r--r--src/armnn/Network.cpp140
-rw-r--r--src/armnn/WorkingMemHandle.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp10
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.hpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp6
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp6
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp6
-rw-r--r--src/armnn/layers/LstmLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.hpp44
-rw-r--r--src/armnn/layers/MeanLayer.cpp2
-rw-r--r--src/armnn/layers/PadLayer.cpp2
-rw-r--r--src/armnn/layers/PreluLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.hpp44
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp26
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp6
-rw-r--r--src/armnn/optimizations/AddBroadcastReshapeLayer.hpp4
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp18
-rw-r--r--src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp2
-rw-r--r--src/armnn/optimizations/FuseBatchNorm.hpp4
-rw-r--r--src/armnn/test/CreateWorkload.hpp116
-rw-r--r--src/armnn/test/GraphTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp56
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp84
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp2
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp8
43 files changed, 329 insertions, 329 deletions
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 782f1939b0..e0d988d8ea 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -7,8 +7,8 @@
#include "Graph.hpp"
#include <ProfilingService.hpp>
#include <armnn/utility/NumericCast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <fmt/format.h>
@@ -283,7 +283,7 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
void Layer::ReleaseConstantData()
{
// Now free up the static data.
- OperateOnConstantTensors([](std::shared_ptr<ConstCpuTensorHandle>& handle)
+ OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
{
handle.reset();
});
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index d43545c01f..76f9b41f4c 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -200,7 +200,7 @@ inline const IOutputSlot* InputSlot::GetConnection() const { return GetConnected
inline IOutputSlot* InputSlot::GetConnection() { return GetConnectedOutputSlot(); }
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
// Base layer class
@@ -391,7 +391,7 @@ protected:
LayerType* CloneBase(Graph& graph, Params&& ... params) const;
// Retrieve the Handles to the constants
- using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstCpuTensorHandle>>>;
+ using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
// "Blob"
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 5c5a963212..46eb9883fb 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -16,7 +16,7 @@
#include <armnn/Logging.hpp>
#include <armnn/utility/Assert.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IMemoryManager.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemSyncWorkload.hpp>
@@ -484,7 +484,7 @@ public:
auto inputTensor = inputTensorPair.second;
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea());
+ std::make_unique<ConstPassthroughTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea());
LayerBindingId layerId = inputTensorPair.first;
m_InputTensorPins.emplace_back(std::move(tensorHandle), inputTensor.GetInfo(), layerId);
@@ -495,7 +495,7 @@ public:
auto outputTensor = outputTensorPair.second;
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea());
+ std::make_unique<PassthroughTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea());
LayerBindingId layerId = outputTensorPair.first;
m_OutputTensorPins.emplace_back(std::move(tensorHandle), outputTensor.GetInfo(), layerId);
@@ -864,7 +864,7 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer,
{
// This assumes a CPU Tensor handle
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),
+ std::make_unique<ConstPassthroughTensorHandle>(inputTensor.GetInfo(),
inputTensor.GetMemoryArea());
void* mem = tensorHandle->Map(false);
@@ -884,7 +884,7 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer,
else
{
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(), inputTensor.GetMemoryArea());
+ std::make_unique<ConstPassthroughTensorHandle>(inputTensor.GetInfo(), inputTensor.GetMemoryArea());
auto copyFunc = [](void* dst, const void* src, size_t size)
{
@@ -928,7 +928,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outp
if (CheckFlag(importFlags, MemorySource::Malloc))
{
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(),
+ std::make_unique<PassthroughTensorHandle>(outputTensor.GetInfo(),
outputTensor.GetMemoryArea());
void* mem = tensorHandle->Map(false);
@@ -964,7 +964,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outp
};
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(),
+ std::make_unique<PassthroughTensorHandle>(outputTensor.GetInfo(),
outputTensor.GetMemoryArea());
CopyTensorContentsGeneric(inputTensorHandle, tensorHandle.get(), copyFunc);
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c99690e4a6..b79576c87e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -12,7 +12,7 @@
#include "BackendSettings.hpp"
#include "optimizations/All.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
@@ -671,7 +671,7 @@ LayerT* ConvertBf16ToFp32Weight(Layer* l)
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+ layer->m_Weight.reset(new ScopedTensorHandle(newInput));
}
}
return layer;
@@ -1752,10 +1752,10 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedD
if (fullyConnectedDescriptor.m_ConstantWeights)
{
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights.value());
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights.value());
if (fullyConnectedDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
}
@@ -1816,11 +1816,11 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDes
const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (convolution2dDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -1864,11 +1864,11 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (convolution2dDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -1913,7 +1913,7 @@ IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::Detect
{
const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
- layer->m_Anchors = std::make_shared<ScopedCpuTensorHandle>(anchors);
+ layer->m_Anchors = std::make_shared<ScopedTensorHandle>(anchors);
return layer;
}
@@ -2011,10 +2011,10 @@ IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizat
{
const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
- layer->m_Mean = std::make_shared<ScopedCpuTensorHandle>(mean);
- layer->m_Variance = std::make_shared<ScopedCpuTensorHandle>(variance);
- layer->m_Beta = std::make_shared<ScopedCpuTensorHandle>(beta);
- layer->m_Gamma = std::make_shared<ScopedCpuTensorHandle>(gamma);
+ layer->m_Mean = std::make_shared<ScopedTensorHandle>(mean);
+ layer->m_Variance = std::make_shared<ScopedTensorHandle>(variance);
+ layer->m_Beta = std::make_shared<ScopedTensorHandle>(beta);
+ layer->m_Gamma = std::make_shared<ScopedTensorHandle>(gamma);
return layer;
}
@@ -2071,7 +2071,7 @@ IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const
{
auto layer = m_Graph->AddLayer<ConstantLayer>(name);
- layer->m_LayerOutput = std::make_shared<ScopedCpuTensorHandle>(input);
+ layer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(input);
return layer;
}
@@ -2107,23 +2107,23 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
//Lstm Basic Parameters
layer->m_BasicParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
layer->m_BasicParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
layer->m_BasicParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
layer->m_BasicParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
layer->m_BasicParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
layer->m_BasicParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
//Lstm Cifg parameters
if(!descriptor.m_CifgEnabled)
@@ -2145,11 +2145,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when CIFG is disabled.");
}
layer->m_CifgParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
layer->m_CifgParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
layer->m_CifgParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
}
//Lstm projection parameters
@@ -2161,11 +2161,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when projection is enabled.");
}
layer->m_ProjectionParameters.m_ProjectionWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
if(params.m_ProjectionBias != nullptr)
{
layer->m_ProjectionParameters.m_ProjectionBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
}
}
@@ -2181,7 +2181,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
}
layer->m_PeepholeParameters.m_CellToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
}
if(params.m_CellToForgetWeights == nullptr)
@@ -2196,9 +2196,9 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
}
layer->m_PeepholeParameters.m_CellToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
layer->m_PeepholeParameters.m_CellToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
}
//Lstm Layer Normalization params
@@ -2212,7 +2212,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when layer normalization is enabled and CIFG disabled.");
}
layer->m_LayerNormParameters.m_InputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
}
if(params.m_ForgetLayerNormWeights == nullptr)
@@ -2231,11 +2231,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when layer normalization is enabled.");
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
}
return layer;
}
@@ -2330,11 +2330,11 @@ IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeCo
const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (descriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -2366,33 +2366,33 @@ IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputPa
// InputToX weights
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToInputWeights());
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToForgetWeights());
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToCellWeights());
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToOutputWeights());
// RecurrentToX weights
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToInputWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToForgetWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToCellWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToOutputWeights());
// Bias
layer->m_QuantizedLstmParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetInputGateBias());
layer->m_QuantizedLstmParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetForgetGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetForgetGateBias());
layer->m_QuantizedLstmParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetCellBias());
+ std::make_shared<ScopedTensorHandle>(params.GetCellBias());
layer->m_QuantizedLstmParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetOutputGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetOutputGateBias());
return layer;
}
@@ -2405,23 +2405,23 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
// QLstm Basic Parameters
layer->m_BasicParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
layer->m_BasicParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
layer->m_BasicParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
layer->m_BasicParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
layer->m_BasicParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
layer->m_BasicParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
// QLstm Cifg parameters
if(!descriptor.m_CifgEnabled)
@@ -2443,11 +2443,11 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_CifgParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
layer->m_CifgParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
layer->m_CifgParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
}
// QLstm Projection parameters
@@ -2459,13 +2459,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_ProjectionParameters.m_ProjectionWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
// Projection bias is optional even if projection is enabled
if(params.m_ProjectionWeights != nullptr)
{
layer->m_ProjectionParameters.m_ProjectionBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
}
}
@@ -2491,13 +2491,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_PeepholeParameters.m_CellToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
}
layer->m_PeepholeParameters.m_CellToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
layer->m_PeepholeParameters.m_CellToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
}
// QLstm Layer Normalization params
@@ -2526,15 +2526,15 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_LayerNormParameters.m_InputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
}
return layer;
}
diff --git a/src/armnn/WorkingMemHandle.cpp b/src/armnn/WorkingMemHandle.cpp
index b54c5baddd..94d796eced 100644
--- a/src/armnn/WorkingMemHandle.cpp
+++ b/src/armnn/WorkingMemHandle.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "backendsCommon/CpuTensorHandle.hpp"
+#include "backendsCommon/TensorHandle.hpp"
#include "WorkingMemHandle.hpp"
#include "Network.hpp"
#include <armnn/backends/IMemoryManager.hpp>
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 83ed45aa33..e3ee643ac5 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index bf9e4b7917..2777633a34 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -9,20 +9,20 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a batch normalization operation.
class BatchNormalizationLayer : public LayerWithParameters<BatchNormalizationDescriptor>
{
public:
/// A unique pointer to store Mean values
- std::shared_ptr<ConstCpuTensorHandle> m_Mean;
+ std::shared_ptr<ConstTensorHandle> m_Mean;
/// A unique pointer to store Variance values
- std::shared_ptr<ConstCpuTensorHandle> m_Variance;
+ std::shared_ptr<ConstTensorHandle> m_Variance;
/// A unique pointer to store Beta values
- std::shared_ptr<ConstCpuTensorHandle> m_Beta;
+ std::shared_ptr<ConstTensorHandle> m_Beta;
/// A unique pointer to store Gamma values
- std::shared_ptr<ConstCpuTensorHandle> m_Gamma;
+ std::shared_ptr<ConstTensorHandle> m_Gamma;
/// Makes a workload for the BatchNormalization type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index a13b0b731a..4b33b96229 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -12,7 +12,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index eb28187d25..feeb762263 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -6,7 +6,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index ff4c03775f..ead8816684 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -9,7 +9,7 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// A layer that the constant data can be bound to.
class ConstantLayer : public Layer
@@ -43,7 +43,7 @@ public:
void ExecuteStrategy(IStrategy& strategy) const override;
- std::shared_ptr<ConstCpuTensorHandle> m_LayerOutput;
+ std::shared_ptr<ConstTensorHandle> m_LayerOutput;
protected:
/// Constructor to create a ConstantLayer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index d7a7a330ef..b7bf0462d8 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <string>
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 2d5ab194de..a33cda27cb 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -9,7 +9,7 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a convolution 2d operation.
class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
@@ -17,9 +17,9 @@ class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
public:
/// A unique pointer to store Weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the Convolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 3511ab58d0..b96c567504 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <string>
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index c83aa434d5..51f6ea9453 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -9,16 +9,16 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a depthwise convolution 2d operation.
class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolution2dDescriptor>
{
public:
/// A unique pointer to store Weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the DepthwiseConvolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index b5086172dd..bd94d1d281 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -8,7 +8,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index e40966a19c..b409134c1c 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -10,14 +10,14 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a detection postprocess operator.
class DetectionPostProcessLayer : public LayerWithParameters<DetectionPostProcessDescriptor>
{
public:
/// A unique pointer to store Anchor values.
- std::shared_ptr<ConstCpuTensorHandle> m_Anchors;
+ std::shared_ptr<ConstTensorHandle> m_Anchors;
/// Makes a workload for the DetectionPostProcess type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 79d56c0bd7..9d4f57d260 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index c45b081c85..7fc7b0d596 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -9,16 +9,16 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a fully connected operation.
class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor>
{
public:
/// A unique pointer to store Weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the FullyConnected type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 403d911e7e..0fea668b97 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/LstmParams.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 80b57a88f7..f711ea7607 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -9,68 +9,68 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
struct LstmOptLayerNormParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_ForgetLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_OutputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_OutputLayerNormWeights;
};
struct LstmOptCifgParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_InputGateBias;
};
struct LstmOptProjectionParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionWeights;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [output_size].
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionBias;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionBias;
};
struct LstmOptPeepholeParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToForgetWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToOutputWeights;
};
struct LstmBasicParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstTensorHandle> m_CellBias;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_OutputGateBias;
};
/// This layer represents a LSTM operation.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index b5c7708fc3..9d4265cdcf 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index f620a6e9a4..2c53f20703 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -6,7 +6,7 @@
#include "PadLayer.hpp"
#include "LayerCloneBase.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index f9f534e648..18d81ae9b6 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -9,9 +9,9 @@
#include <armnn/utility/NumericCast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
namespace armnn
{
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 72b020f109..0294afdc0d 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/LstmParams.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 09a020dc1d..38a0464da6 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -9,70 +9,70 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
struct QLstmBasicParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstTensorHandle> m_CellBias;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_OutputGateBias;
};
struct QLstmOptProjectionParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionWeights;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [output_size] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionBias;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionBias;
};
struct QLstmOptPeepholeParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToForgetWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToOutputWeights;
};
struct QLstmOptCifgParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_InputGateBias;
};
struct QLstmOptLayerNormParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_InputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_ForgetLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_OutputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_OutputLayerNormWeights;
};
/// This layer represents a QLstm operation.
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 4d0dab9505..be50f4863b 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/QuantizedLstmParams.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index ca97a6bb65..25cc7b7d8b 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -9,36 +9,36 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
struct QuantizedLstmParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_InputGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstTensorHandle> m_CellBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_OutputGateBias;
};
/// This layer represents a QuantizedLstm operation.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index c0a7dfa1cd..c774dd0bbf 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -8,7 +8,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
using namespace armnnUtils;
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 53e73491d6..1b17dac3c6 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -9,16 +9,16 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a 2D transpose convolution operation.
class TransposeConvolution2dLayer : public LayerWithParameters<TransposeConvolution2dDescriptor>
{
public:
/// A unique pointer to store weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the TransposeConvolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index aa00b9913c..4cfe2e4898 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -8,7 +8,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
@@ -70,7 +70,7 @@ public:
{
ConstantLayer& constantLayer = static_cast<ConstantLayer&>(parentLayer);
- constantLayer.m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(
+ constantLayer.m_LayerOutput = std::make_unique<ScopedTensorHandle>(
ConstTensor(reshapeInfo, constantLayer.m_LayerOutput.get()->GetConstTensor<void>()));
constantLayer.GetOutputSlot().SetTensorInfo(reshapeInfo);
}
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index df5a5b4f67..66b3d2685a 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
@@ -23,7 +23,7 @@ namespace optimizations
struct BFloat16ToFloat32
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -37,14 +37,14 @@ struct BFloat16ToFloat32
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float16ToFloat32
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -58,14 +58,14 @@ struct Float16ToFloat32
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float32ToBFloat16
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -79,14 +79,14 @@ struct Float32ToBFloat16
TensorInfo newInfo(info.GetShape(), DataType::BFloat16);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float32ToFloat16
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -100,7 +100,7 @@ struct Float32ToFloat16
TensorInfo newInfo(info.GetShape(), DataType::Float16);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
index a0856a485b..6c80e740be 100644
--- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
+++ b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
@@ -35,7 +35,7 @@ inline LayerT* ConvertWeight(Layer* l)
TensorInfo newInfo(info);
newInfo.SetDataType(DataType::BFloat16);
ConstTensor newInput(newInfo, newValues);
- layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+ layer->m_Weight.reset(new ScopedTensorHandle(newInput));
}
}
return layer;
diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp
index 9d25379930..3fb4b34d28 100644
--- a/src/armnn/optimizations/FuseBatchNorm.hpp
+++ b/src/armnn/optimizations/FuseBatchNorm.hpp
@@ -162,8 +162,8 @@ public:
auto& newConv2dLayer = *graph.InsertNewLayer<ConvLayer>(base.GetInputSlot(0),
convDescriptor,
name.c_str());
- newConv2dLayer.m_Weight = std::make_unique<ScopedCpuTensorHandle>(fusedWeightsTensor);
- newConv2dLayer.m_Bias = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(fusedBiasTensor));
+ newConv2dLayer.m_Weight = std::make_unique<ScopedTensorHandle>(fusedWeightsTensor);
+ newConv2dLayer.m_Bias = std::make_unique<ScopedTensorHandle>(ConstTensor(fusedBiasTensor));
// Reconnects with original parent.
newConv2dLayer.GetOutputSlot().MoveAllConnections(*parentOut);
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 3ea2c35061..12623e62a0 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -14,9 +14,9 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <boost/test/unit_test.hpp>
@@ -353,10 +353,10 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkload
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -411,10 +411,10 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlob
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -492,8 +492,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -555,8 +555,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -639,8 +639,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a
TensorShape inputShape = TensorShape{1, 32, 149, 149};
TensorShape outputShape = TensorShape{1, 32, 147, 147};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -692,23 +692,23 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
unsigned int numUnits = 4;
unsigned int outputSize = 4;
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
@@ -724,9 +724,9 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
if (layerDesc.m_PeepholeEnabled)
{
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
@@ -814,27 +814,27 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Weights and bias
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
// Allocate weights and bias
layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate();
@@ -977,27 +977,27 @@ std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory&
armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
// Create and allocate tensors
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
layer->m_BasicParameters.m_InputToCellWeights->Allocate();
@@ -1093,8 +1093,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>
(TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -1148,7 +1148,7 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({1, 2, 4, 4}, DataType)); // [ M, I, H, W ]
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({1, 2, 4, 4}, DataType)); // [ M, I, H, W ]
layer->m_Weight->Allocate();
// Creates extra layers.
@@ -1200,8 +1200,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -1245,8 +1245,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -2108,7 +2108,7 @@ std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFac
armnn::TensorInfo outputTensorInfo(outputShape, DataType);
auto constant = graph.AddLayer<ConstantLayer>("constant");
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(outputTensorInfo);
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
BOOST_TEST_CHECKPOINT("created constant layer");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 602575b352..69f96d43a3 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -14,7 +14,7 @@
#include <armnn/backends/IBackendInternal.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
#include <boost/test/unit_test.hpp>
@@ -603,7 +603,7 @@ BOOST_AUTO_TEST_CASE(CheckGraphConstTensorSharing)
float weight = 1.0f;
armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32}, &weight);
- fcLayer->m_Weight = std::make_shared<armnn::ScopedCpuTensorHandle>(constTensor);;
+ fcLayer->m_Weight = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
// point sharedWeightPtr to graph1's const tensor
sharedWeightPtr = fcLayer->m_Weight->GetConstTensor<float>();
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 7e8a898293..d0734d83be 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -18,9 +18,9 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <boost/test/unit_test.hpp>
@@ -45,23 +45,23 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
unsigned int numUnits = 4;
unsigned int outputSize = 4;
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
@@ -76,11 +76,11 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
if (!layerDesc.m_CifgEnabled)
{
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_CifgParameters.m_InputToInputWeights->Allocate();
layer->m_CifgParameters.m_RecurrentToInputWeights->Allocate();
@@ -89,9 +89,9 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
if (layerDesc.m_ProjectionEnabled)
{
- layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ outputSize, numUnits }, DataType::Float32));
- layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ outputSize }, DataType::Float32));
layer->m_ProjectionParameters.m_ProjectionWeights->Allocate();
layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
@@ -101,13 +101,13 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
{
if (!layerDesc.m_CifgEnabled)
{
- layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToInputWeights->Allocate();
}
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
@@ -276,7 +276,7 @@ void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
input->GetOutputSlot().SetTensorInfo(inputInfo);
Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -326,7 +326,7 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
input->GetOutputSlot().SetTensorInfo(inputInfo);
DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -529,7 +529,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
descriptor.m_MaxDetections = 3;
DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
- layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
+ layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
@@ -571,7 +571,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
- conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ conv2dLayer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -1211,16 +1211,16 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
conv->GetOutputSlot().SetTensorInfo(outputInfo);
batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
- conv->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
- batchNorm->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
- batchNorm->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
- batchNorm->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
- batchNorm->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
+ conv->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
+ batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
+ batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
+ batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
+ batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
if (convolution2dDescriptor.m_BiasEnabled)
{
std::vector<float> biasVector = { 11 };
ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32), biasVector);
- conv->m_Bias = std::make_unique<ScopedCpuTensorHandle>(bias);
+ conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
}
// Connect layers
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 25b0feaded..fa3f400569 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -9,8 +9,8 @@
#include <Graph.hpp>
#include <InternalTypes.hpp>
#include <layers/FullyConnectedLayer.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <string>
@@ -240,7 +240,7 @@ BOOST_AUTO_TEST_CASE(ConstantTesst)
const float Datum = 0.0f;
ConstTensor output0({outputShape, DataType::Float32}, &Datum);
- layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(output0);
+ layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
@@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(Convolution2dTest)
const float Datum = 0.0f;
ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
}
@@ -339,7 +339,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
const float Datum = 0.0f;
ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
}
@@ -379,7 +379,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
descriptor,
"detectionpostprocess");
- layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchorsTensor);
+ layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
}
@@ -415,7 +415,7 @@ BOOST_AUTO_TEST_CASE(FullyConnectedTest)
const float Datum = 0.0f;
ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
}
@@ -469,18 +469,18 @@ BOOST_AUTO_TEST_CASE(LstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
}
@@ -557,18 +557,18 @@ BOOST_AUTO_TEST_CASE(QLstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
@@ -585,18 +585,18 @@ BOOST_AUTO_TEST_CASE(QuantizedLstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 73ef8bea91..ecb876dc7a 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <fstream>
#include <map>
#include <queue>
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 4523e70437..d0d728bfab 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -299,7 +299,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
uint8_t tensor[] = { 1, 1, 1, 1, 1 };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(info1, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(info1, &tensor));
input->GetOutputSlot().SetTensorInfo(info0);
constant->GetOutputSlot().SetTensorInfo(info1);
@@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
constant->GetOutputSlot().SetTensorInfo(constantTermInfo);
float tensor[] = { 2.0f };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(constantTermInfo, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(constantTermInfo, &tensor));
add1->GetOutputSlot().SetTensorInfo(outputInfo);
input->GetOutputSlot().Connect(add1->GetInputSlot(0));
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index bb8e674b56..e4c1f2f413 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 12df462456..1dfe7f431c 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 7d7c6b2b0a..1ddf5262e8 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index a65012eef4..f93fa77b0d 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -72,8 +72,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
armnn::Convolution2dDescriptor descriptor;
auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- conv->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ conv->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ conv->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
conv->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -142,8 +142,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
armnn::FullyConnectedDescriptor descriptor;
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- fc->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ fc->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
fc->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");