aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2021-04-27 17:13:27 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-05-07 18:12:09 +0100
commitdca769b9673a1e197258f7b35637b2a17f1a9e8b (patch)
treee92451e00d459a2fc0d870694460f482aa4c77ae
parenta7a12f5c3654da554ad6197beff0f0fc54681c92 (diff)
downloadarmnn-dca769b9673a1e197258f7b35637b2a17f1a9e8b.tar.gz
IVGCVSW-5815 Generalise ConstCpuTensorHandle
* Generalises ConstCpuTensorHandle and inherited classes by removing 'Cpu' from aliases. * New renamed classes: ConstTensorHandle, TensorHandle, ScopedTensorHandle, PassthroughTensorHandle, ConstPassthroughTensorHandle. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16
-rw-r--r--include/armnn/backends/CMakeLists.txt1
-rw-r--r--include/armnn/backends/CpuTensorHandleFwd.hpp3
-rw-r--r--include/armnn/backends/TensorHandleFwd.hpp17
-rw-r--r--src/armnn/Layer.cpp4
-rw-r--r--src/armnn/Layer.hpp4
-rw-r--r--src/armnn/LoadedNetwork.cpp14
-rw-r--r--src/armnn/Network.cpp140
-rw-r--r--src/armnn/WorkingMemHandle.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp10
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.hpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp6
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp6
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp6
-rw-r--r--src/armnn/layers/LstmLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.hpp44
-rw-r--r--src/armnn/layers/MeanLayer.cpp2
-rw-r--r--src/armnn/layers/PadLayer.cpp2
-rw-r--r--src/armnn/layers/PreluLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.hpp44
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp26
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp6
-rw-r--r--src/armnn/optimizations/AddBroadcastReshapeLayer.hpp4
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp18
-rw-r--r--src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp2
-rw-r--r--src/armnn/optimizations/FuseBatchNorm.hpp4
-rw-r--r--src/armnn/test/CreateWorkload.hpp116
-rw-r--r--src/armnn/test/GraphTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp56
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp84
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp2
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp8
-rw-r--r--src/backends/backendsCommon/CMakeLists.txt5
-rw-r--r--src/backends/backendsCommon/CpuTensorHandleFwd.hpp9
-rw-r--r--src/backends/backendsCommon/MemCopyWorkload.cpp2
-rw-r--r--src/backends/backendsCommon/MemCopyWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/MemImportWorkload.cpp2
-rw-r--r--src/backends/backendsCommon/MemImportWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/MemSyncWorkload.cpp2
-rw-r--r--src/backends/backendsCommon/MemSyncWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/TensorHandle.cpp (renamed from src/backends/backendsCommon/CpuTensorHandle.cpp)54
-rw-r--r--src/backends/backendsCommon/TensorHandle.hpp (renamed from src/backends/backendsCommon/CpuTensorHandle.hpp)94
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp142
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp4
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp6
-rw-r--r--src/backends/backendsCommon/common.mk2
-rw-r--r--src/backends/backendsCommon/test/CommonTestUtils.hpp6
-rw-r--r--src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp30
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp4
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp106
-rw-r--r--src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp22
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp58
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp26
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp46
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp244
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp12
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp2
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp4
-rw-r--r--src/backends/cl/test/ClLayerSupportTests.cpp2
-rw-r--r--src/backends/cl/test/Fp16SupportTest.cpp2
-rw-r--r--src/backends/cl/test/OpenClTimerTest.cpp10
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClArgMinMaxWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClComparisonWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConcatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConstantWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClDequantizeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDivisionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMaximumWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMinimumWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMultiplicationWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPreluWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClQuantizeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClReshapeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClResizeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSplitterWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp4
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/test/NeonLayerSupportTests.cpp2
-rw-r--r--src/backends/neon/test/NeonTimerTest.cpp2
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonComparisonWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDequantizeWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonDivisionWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonMaximumWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonMinimumWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonResizeWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonStackWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSubtractionWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp4
-rw-r--r--src/backends/reference/RefTensorHandle.hpp2
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/test/RefLayerSupportTests.cpp2
-rw-r--r--src/backends/reference/test/RefTensorHandleTests.cpp4
-rw-r--r--src/backends/reference/workloads/LstmUtils.cpp6
-rw-r--r--src/backends/reference/workloads/LstmUtils.hpp4
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp8
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp8
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp2
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.cpp42
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.hpp42
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.cpp52
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.hpp52
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp6
-rw-r--r--src/backends/reference/workloads/RefWorkloadUtils.hpp2
-rw-r--r--src/dynamic/sample/SampleDynamicWorkloadFactory.cpp6
-rw-r--r--src/dynamic/sample/SampleTensorHandle.hpp2
166 files changed, 1015 insertions, 992 deletions
diff --git a/include/armnn/backends/CMakeLists.txt b/include/armnn/backends/CMakeLists.txt
index 94e757fe8a..58d0abf872 100644
--- a/include/armnn/backends/CMakeLists.txt
+++ b/include/armnn/backends/CMakeLists.txt
@@ -5,6 +5,7 @@
list(APPEND armnnBackendsAPI_sources
CpuTensorHandleFwd.hpp
+ TensorHandleFwd.hpp
DynamicBackend.hpp
IBackendInternal.hpp
IBackendContext.hpp
diff --git a/include/armnn/backends/CpuTensorHandleFwd.hpp b/include/armnn/backends/CpuTensorHandleFwd.hpp
index c4b58a2bb2..a5a28d8135 100644
--- a/include/armnn/backends/CpuTensorHandleFwd.hpp
+++ b/include/armnn/backends/CpuTensorHandleFwd.hpp
@@ -3,6 +3,9 @@
// SPDX-License-Identifier: MIT
//
+// This file is deprecated and will be removed soon.
+// Please use the new file include/armnn/TensorHandleFwd.hpp instead.
+
#pragma once
namespace armnn
diff --git a/include/armnn/backends/TensorHandleFwd.hpp b/include/armnn/backends/TensorHandleFwd.hpp
new file mode 100644
index 0000000000..9faa499932
--- /dev/null
+++ b/include/armnn/backends/TensorHandleFwd.hpp
@@ -0,0 +1,17 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+class ConstTensorHandle;
+class TensorHandle;
+class ScopedTensorHandle;
+class PassthroughTensorHandle;
+class ConstPassthroughTensorHandle;
+
+} // namespace armnn
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 782f1939b0..e0d988d8ea 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -7,8 +7,8 @@
#include "Graph.hpp"
#include <ProfilingService.hpp>
#include <armnn/utility/NumericCast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <fmt/format.h>
@@ -283,7 +283,7 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
void Layer::ReleaseConstantData()
{
// Now free up the static data.
- OperateOnConstantTensors([](std::shared_ptr<ConstCpuTensorHandle>& handle)
+ OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
{
handle.reset();
});
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index d43545c01f..76f9b41f4c 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -200,7 +200,7 @@ inline const IOutputSlot* InputSlot::GetConnection() const { return GetConnected
inline IOutputSlot* InputSlot::GetConnection() { return GetConnectedOutputSlot(); }
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
// Base layer class
@@ -391,7 +391,7 @@ protected:
LayerType* CloneBase(Graph& graph, Params&& ... params) const;
// Retrieve the Handles to the constants
- using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstCpuTensorHandle>>>;
+ using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
// "Blob"
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 5c5a963212..46eb9883fb 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -16,7 +16,7 @@
#include <armnn/Logging.hpp>
#include <armnn/utility/Assert.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IMemoryManager.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemSyncWorkload.hpp>
@@ -484,7 +484,7 @@ public:
auto inputTensor = inputTensorPair.second;
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea());
+ std::make_unique<ConstPassthroughTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea());
LayerBindingId layerId = inputTensorPair.first;
m_InputTensorPins.emplace_back(std::move(tensorHandle), inputTensor.GetInfo(), layerId);
@@ -495,7 +495,7 @@ public:
auto outputTensor = outputTensorPair.second;
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea());
+ std::make_unique<PassthroughTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea());
LayerBindingId layerId = outputTensorPair.first;
m_OutputTensorPins.emplace_back(std::move(tensorHandle), outputTensor.GetInfo(), layerId);
@@ -864,7 +864,7 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer,
{
// This assumes a CPU Tensor handle
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),
+ std::make_unique<ConstPassthroughTensorHandle>(inputTensor.GetInfo(),
inputTensor.GetMemoryArea());
void* mem = tensorHandle->Map(false);
@@ -884,7 +884,7 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer,
else
{
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(), inputTensor.GetMemoryArea());
+ std::make_unique<ConstPassthroughTensorHandle>(inputTensor.GetInfo(), inputTensor.GetMemoryArea());
auto copyFunc = [](void* dst, const void* src, size_t size)
{
@@ -928,7 +928,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outp
if (CheckFlag(importFlags, MemorySource::Malloc))
{
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(),
+ std::make_unique<PassthroughTensorHandle>(outputTensor.GetInfo(),
outputTensor.GetMemoryArea());
void* mem = tensorHandle->Map(false);
@@ -964,7 +964,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outp
};
std::unique_ptr<ITensorHandle> tensorHandle =
- std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(),
+ std::make_unique<PassthroughTensorHandle>(outputTensor.GetInfo(),
outputTensor.GetMemoryArea());
CopyTensorContentsGeneric(inputTensorHandle, tensorHandle.get(), copyFunc);
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c99690e4a6..b79576c87e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -12,7 +12,7 @@
#include "BackendSettings.hpp"
#include "optimizations/All.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
@@ -671,7 +671,7 @@ LayerT* ConvertBf16ToFp32Weight(Layer* l)
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+ layer->m_Weight.reset(new ScopedTensorHandle(newInput));
}
}
return layer;
@@ -1752,10 +1752,10 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedD
if (fullyConnectedDescriptor.m_ConstantWeights)
{
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights.value());
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights.value());
if (fullyConnectedDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
}
@@ -1816,11 +1816,11 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDes
const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (convolution2dDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -1864,11 +1864,11 @@ IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (convolution2dDescriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -1913,7 +1913,7 @@ IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::Detect
{
const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
- layer->m_Anchors = std::make_shared<ScopedCpuTensorHandle>(anchors);
+ layer->m_Anchors = std::make_shared<ScopedTensorHandle>(anchors);
return layer;
}
@@ -2011,10 +2011,10 @@ IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizat
{
const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name);
- layer->m_Mean = std::make_shared<ScopedCpuTensorHandle>(mean);
- layer->m_Variance = std::make_shared<ScopedCpuTensorHandle>(variance);
- layer->m_Beta = std::make_shared<ScopedCpuTensorHandle>(beta);
- layer->m_Gamma = std::make_shared<ScopedCpuTensorHandle>(gamma);
+ layer->m_Mean = std::make_shared<ScopedTensorHandle>(mean);
+ layer->m_Variance = std::make_shared<ScopedTensorHandle>(variance);
+ layer->m_Beta = std::make_shared<ScopedTensorHandle>(beta);
+ layer->m_Gamma = std::make_shared<ScopedTensorHandle>(gamma);
return layer;
}
@@ -2071,7 +2071,7 @@ IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const
{
auto layer = m_Graph->AddLayer<ConstantLayer>(name);
- layer->m_LayerOutput = std::make_shared<ScopedCpuTensorHandle>(input);
+ layer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(input);
return layer;
}
@@ -2107,23 +2107,23 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
//Lstm Basic Parameters
layer->m_BasicParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
layer->m_BasicParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
layer->m_BasicParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
layer->m_BasicParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
layer->m_BasicParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
layer->m_BasicParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
//Lstm Cifg parameters
if(!descriptor.m_CifgEnabled)
@@ -2145,11 +2145,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when CIFG is disabled.");
}
layer->m_CifgParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
layer->m_CifgParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
layer->m_CifgParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
}
//Lstm projection parameters
@@ -2161,11 +2161,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when projection is enabled.");
}
layer->m_ProjectionParameters.m_ProjectionWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
if(params.m_ProjectionBias != nullptr)
{
layer->m_ProjectionParameters.m_ProjectionBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
}
}
@@ -2181,7 +2181,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
}
layer->m_PeepholeParameters.m_CellToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
}
if(params.m_CellToForgetWeights == nullptr)
@@ -2196,9 +2196,9 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
}
layer->m_PeepholeParameters.m_CellToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
layer->m_PeepholeParameters.m_CellToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
}
//Lstm Layer Normalization params
@@ -2212,7 +2212,7 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when layer normalization is enabled and CIFG disabled.");
}
layer->m_LayerNormParameters.m_InputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
}
if(params.m_ForgetLayerNormWeights == nullptr)
@@ -2231,11 +2231,11 @@ IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor& descriptor,
"when layer normalization is enabled.");
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
}
return layer;
}
@@ -2330,11 +2330,11 @@ IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeCo
const auto layer = m_Graph->AddLayer<TransposeConvolution2dLayer>(descriptor, name);
- layer->m_Weight = std::make_shared<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
if (descriptor.m_BiasEnabled)
{
- layer->m_Bias = std::make_shared<ScopedCpuTensorHandle>(biases.value());
+ layer->m_Bias = std::make_shared<ScopedTensorHandle>(biases.value());
}
return layer;
@@ -2366,33 +2366,33 @@ IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputPa
// InputToX weights
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToInputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToInputWeights());
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToForgetWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToForgetWeights());
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToCellWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToCellWeights());
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputToOutputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetInputToOutputWeights());
// RecurrentToX weights
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToInputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToInputWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToForgetWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToForgetWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToCellWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToCellWeights());
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(params.GetRecurrentToOutputWeights());
+ std::make_shared<ScopedTensorHandle>(params.GetRecurrentToOutputWeights());
// Bias
layer->m_QuantizedLstmParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetInputGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetInputGateBias());
layer->m_QuantizedLstmParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetForgetGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetForgetGateBias());
layer->m_QuantizedLstmParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetCellBias());
+ std::make_shared<ScopedTensorHandle>(params.GetCellBias());
layer->m_QuantizedLstmParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(params.GetOutputGateBias());
+ std::make_shared<ScopedTensorHandle>(params.GetOutputGateBias());
return layer;
}
@@ -2405,23 +2405,23 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
// QLstm Basic Parameters
layer->m_BasicParameters.m_InputToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToForgetWeights));
layer->m_BasicParameters.m_InputToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToCellWeights));
layer->m_BasicParameters.m_InputToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToOutputWeights));
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToForgetWeights));
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToCellWeights));
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToOutputWeights));
layer->m_BasicParameters.m_ForgetGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetGateBias));
layer->m_BasicParameters.m_CellBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellBias));
layer->m_BasicParameters.m_OutputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputGateBias));
// QLstm Cifg parameters
if(!descriptor.m_CifgEnabled)
@@ -2443,11 +2443,11 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_CifgParameters.m_InputToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputToInputWeights));
layer->m_CifgParameters.m_RecurrentToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_RecurrentToInputWeights));
layer->m_CifgParameters.m_InputGateBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputGateBias));
}
// QLstm Projection parameters
@@ -2459,13 +2459,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_ProjectionParameters.m_ProjectionWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionWeights));
// Projection bias is optional even if projection is enabled
if(params.m_ProjectionWeights != nullptr)
{
layer->m_ProjectionParameters.m_ProjectionBias =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ProjectionBias));
}
}
@@ -2491,13 +2491,13 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_PeepholeParameters.m_CellToInputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToInputWeights));
}
layer->m_PeepholeParameters.m_CellToForgetWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToForgetWeights));
layer->m_PeepholeParameters.m_CellToOutputWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellToOutputWeights));
}
// QLstm Layer Normalization params
@@ -2526,15 +2526,15 @@ IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor& descriptor
}
layer->m_LayerNormParameters.m_InputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_InputLayerNormWeights));
}
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_ForgetLayerNormWeights));
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_CellLayerNormWeights));
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_shared<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ std::make_shared<ScopedTensorHandle>(*(params.m_OutputLayerNormWeights));
}
return layer;
}
diff --git a/src/armnn/WorkingMemHandle.cpp b/src/armnn/WorkingMemHandle.cpp
index b54c5baddd..94d796eced 100644
--- a/src/armnn/WorkingMemHandle.cpp
+++ b/src/armnn/WorkingMemHandle.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "backendsCommon/CpuTensorHandle.hpp"
+#include "backendsCommon/TensorHandle.hpp"
#include "WorkingMemHandle.hpp"
#include "Network.hpp"
#include <armnn/backends/IMemoryManager.hpp>
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 83ed45aa33..e3ee643ac5 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index bf9e4b7917..2777633a34 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -9,20 +9,20 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a batch normalization operation.
class BatchNormalizationLayer : public LayerWithParameters<BatchNormalizationDescriptor>
{
public:
/// A unique pointer to store Mean values
- std::shared_ptr<ConstCpuTensorHandle> m_Mean;
+ std::shared_ptr<ConstTensorHandle> m_Mean;
/// A unique pointer to store Variance values
- std::shared_ptr<ConstCpuTensorHandle> m_Variance;
+ std::shared_ptr<ConstTensorHandle> m_Variance;
/// A unique pointer to store Beta values
- std::shared_ptr<ConstCpuTensorHandle> m_Beta;
+ std::shared_ptr<ConstTensorHandle> m_Beta;
/// A unique pointer to store Gamma values
- std::shared_ptr<ConstCpuTensorHandle> m_Gamma;
+ std::shared_ptr<ConstTensorHandle> m_Gamma;
/// Makes a workload for the BatchNormalization type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index a13b0b731a..4b33b96229 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -12,7 +12,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index eb28187d25..feeb762263 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -6,7 +6,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index ff4c03775f..ead8816684 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -9,7 +9,7 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// A layer that the constant data can be bound to.
class ConstantLayer : public Layer
@@ -43,7 +43,7 @@ public:
void ExecuteStrategy(IStrategy& strategy) const override;
- std::shared_ptr<ConstCpuTensorHandle> m_LayerOutput;
+ std::shared_ptr<ConstTensorHandle> m_LayerOutput;
protected:
/// Constructor to create a ConstantLayer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index d7a7a330ef..b7bf0462d8 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <string>
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 2d5ab194de..a33cda27cb 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -9,7 +9,7 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a convolution 2d operation.
class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
@@ -17,9 +17,9 @@ class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
public:
/// A unique pointer to store Weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the Convolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 3511ab58d0..b96c567504 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <string>
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index c83aa434d5..51f6ea9453 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -9,16 +9,16 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a depthwise convolution 2d operation.
class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolution2dDescriptor>
{
public:
/// A unique pointer to store Weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the DepthwiseConvolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index b5086172dd..bd94d1d281 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -8,7 +8,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index e40966a19c..b409134c1c 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -10,14 +10,14 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a detection postprocess operator.
class DetectionPostProcessLayer : public LayerWithParameters<DetectionPostProcessDescriptor>
{
public:
/// A unique pointer to store Anchor values.
- std::shared_ptr<ConstCpuTensorHandle> m_Anchors;
+ std::shared_ptr<ConstTensorHandle> m_Anchors;
/// Makes a workload for the DetectionPostProcess type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 79d56c0bd7..9d4f57d260 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index c45b081c85..7fc7b0d596 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -9,16 +9,16 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a fully connected operation.
class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor>
{
public:
/// A unique pointer to store Weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store Bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the FullyConnected type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 403d911e7e..0fea668b97 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/LstmParams.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 80b57a88f7..f711ea7607 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -9,68 +9,68 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
struct LstmOptLayerNormParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_ForgetLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_OutputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_OutputLayerNormWeights;
};
struct LstmOptCifgParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_InputGateBias;
};
struct LstmOptProjectionParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionWeights;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [output_size].
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionBias;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionBias;
};
struct LstmOptPeepholeParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToForgetWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToOutputWeights;
};
struct LstmBasicParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstTensorHandle> m_CellBias;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units].
- std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_OutputGateBias;
};
/// This layer represents a LSTM operation.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index b5c7708fc3..9d4265cdcf 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index f620a6e9a4..2c53f20703 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -6,7 +6,7 @@
#include "PadLayer.hpp"
#include "LayerCloneBase.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index f9f534e648..18d81ae9b6 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -9,9 +9,9 @@
#include <armnn/utility/NumericCast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
namespace armnn
{
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 72b020f109..0294afdc0d 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/LstmParams.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 09a020dc1d..38a0464da6 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -9,70 +9,70 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
struct QLstmBasicParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, inputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [num_units, outputSize] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstTensorHandle> m_CellBias;
/// A unique pointer to represent 1D bias tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_OutputGateBias;
};
struct QLstmOptProjectionParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionWeights;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [output_size] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_ProjectionBias;
+ std::shared_ptr<ConstTensorHandle> m_ProjectionBias;
};
struct QLstmOptPeepholeParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToForgetWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellToOutputWeights;
};
struct QLstmOptCifgParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units] (QSymmS8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_InputGateBias;
};
struct QLstmOptLayerNormParameters
{
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_InputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_ForgetLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_CellLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_CellLayerNormWeights;
/// A unique pointer to represent 1D weights tensor with dimensions [num_units] (QSymmS16).
- std::shared_ptr<ConstCpuTensorHandle> m_OutputLayerNormWeights;
+ std::shared_ptr<ConstTensorHandle> m_OutputLayerNormWeights;
};
/// This layer represents a QLstm operation.
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 4d0dab9505..be50f4863b 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -8,7 +8,7 @@
#include <armnn/QuantizedLstmParams.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
namespace armnn
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index ca97a6bb65..25cc7b7d8b 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -9,36 +9,36 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
struct QuantizedLstmParameters
{
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_InputToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_InputToOutputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToInputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToInputWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToForgetWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToForgetWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToCellWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToCellWeights;
/// A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8).
- std::shared_ptr<ConstCpuTensorHandle> m_RecurrentToOutputWeights;
+ std::shared_ptr<ConstTensorHandle> m_RecurrentToOutputWeights;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_InputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_InputGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_ForgetGateBias;
+ std::shared_ptr<ConstTensorHandle> m_ForgetGateBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_CellBias;
+ std::shared_ptr<ConstTensorHandle> m_CellBias;
/// A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
- std::shared_ptr<ConstCpuTensorHandle> m_OutputGateBias;
+ std::shared_ptr<ConstTensorHandle> m_OutputGateBias;
};
/// This layer represents a QuantizedLstm operation.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index c0a7dfa1cd..c774dd0bbf 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -8,7 +8,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
using namespace armnnUtils;
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 53e73491d6..1b17dac3c6 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -9,16 +9,16 @@
namespace armnn
{
-class ScopedCpuTensorHandle;
+class ScopedTensorHandle;
/// This layer represents a 2D transpose convolution operation.
class TransposeConvolution2dLayer : public LayerWithParameters<TransposeConvolution2dDescriptor>
{
public:
/// A unique pointer to store weight values.
- std::shared_ptr<ConstCpuTensorHandle> m_Weight;
+ std::shared_ptr<ConstTensorHandle> m_Weight;
/// A unique pointer to store bias values.
- std::shared_ptr<ConstCpuTensorHandle> m_Bias;
+ std::shared_ptr<ConstTensorHandle> m_Bias;
/// Makes a workload for the TransposeConvolution2d type.
/// @param [in] graph The graph where this layer can be found.
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index aa00b9913c..4cfe2e4898 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -8,7 +8,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
@@ -70,7 +70,7 @@ public:
{
ConstantLayer& constantLayer = static_cast<ConstantLayer&>(parentLayer);
- constantLayer.m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(
+ constantLayer.m_LayerOutput = std::make_unique<ScopedTensorHandle>(
ConstTensor(reshapeInfo, constantLayer.m_LayerOutput.get()->GetConstTensor<void>()));
constantLayer.GetOutputSlot().SetTensorInfo(reshapeInfo);
}
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index df5a5b4f67..66b3d2685a 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
@@ -23,7 +23,7 @@ namespace optimizations
struct BFloat16ToFloat32
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -37,14 +37,14 @@ struct BFloat16ToFloat32
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float16ToFloat32
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -58,14 +58,14 @@ struct Float16ToFloat32
TensorInfo newInfo(info.GetShape(), DataType::Float32);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float32ToBFloat16
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -79,14 +79,14 @@ struct Float32ToBFloat16
TensorInfo newInfo(info.GetShape(), DataType::BFloat16);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
struct Float32ToFloat16
{
- static void Func(std::shared_ptr<ConstCpuTensorHandle>& handle)
+ static void Func(std::shared_ptr<ConstTensorHandle>& handle)
{
const TensorInfo& info = handle->GetTensorInfo();
@@ -100,7 +100,7 @@ struct Float32ToFloat16
TensorInfo newInfo(info.GetShape(), DataType::Float16);
ConstTensor newInput(newInfo, newValues);
- handle.reset(new ScopedCpuTensorHandle(newInput));
+ handle.reset(new ScopedTensorHandle(newInput));
}
}
};
diff --git a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
index a0856a485b..6c80e740be 100644
--- a/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
+++ b/src/armnn/optimizations/ConvertFp32NetworkToBf16.hpp
@@ -35,7 +35,7 @@ inline LayerT* ConvertWeight(Layer* l)
TensorInfo newInfo(info);
newInfo.SetDataType(DataType::BFloat16);
ConstTensor newInput(newInfo, newValues);
- layer->m_Weight.reset(new ScopedCpuTensorHandle(newInput));
+ layer->m_Weight.reset(new ScopedTensorHandle(newInput));
}
}
return layer;
diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp
index 9d25379930..3fb4b34d28 100644
--- a/src/armnn/optimizations/FuseBatchNorm.hpp
+++ b/src/armnn/optimizations/FuseBatchNorm.hpp
@@ -162,8 +162,8 @@ public:
auto& newConv2dLayer = *graph.InsertNewLayer<ConvLayer>(base.GetInputSlot(0),
convDescriptor,
name.c_str());
- newConv2dLayer.m_Weight = std::make_unique<ScopedCpuTensorHandle>(fusedWeightsTensor);
- newConv2dLayer.m_Bias = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(fusedBiasTensor));
+ newConv2dLayer.m_Weight = std::make_unique<ScopedTensorHandle>(fusedWeightsTensor);
+ newConv2dLayer.m_Bias = std::make_unique<ScopedTensorHandle>(ConstTensor(fusedBiasTensor));
// Reconnects with original parent.
newConv2dLayer.GetOutputSlot().MoveAllConnections(*parentOut);
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 3ea2c35061..12623e62a0 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -14,9 +14,9 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <boost/test/unit_test.hpp>
@@ -353,10 +353,10 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkload
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -411,10 +411,10 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlob
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -492,8 +492,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -555,8 +555,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -639,8 +639,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a
TensorShape inputShape = TensorShape{1, 32, 149, 149};
TensorShape outputShape = TensorShape{1, 32, 147, 147};
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -692,23 +692,23 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
unsigned int numUnits = 4;
unsigned int outputSize = 4;
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
@@ -724,9 +724,9 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
if (layerDesc.m_PeepholeEnabled)
{
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
@@ -814,27 +814,27 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
// Weights and bias
layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
// Allocate weights and bias
layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate();
@@ -977,27 +977,27 @@ std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory&
armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
// Create and allocate tensors
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedCpuTensorHandle>(recurrentWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_unique<ScopedCpuTensorHandle>(layerNormWeightsInfo);
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
layer->m_BasicParameters.m_InputToCellWeights->Allocate();
@@ -1093,8 +1093,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>
(TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -1148,7 +1148,7 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({1, 2, 4, 4}, DataType)); // [ M, I, H, W ]
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({1, 2, 4, 4}, DataType)); // [ M, I, H, W ]
layer->m_Weight->Allocate();
// Creates extra layers.
@@ -1200,8 +1200,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -1245,8 +1245,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -2108,7 +2108,7 @@ std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFac
armnn::TensorInfo outputTensorInfo(outputShape, DataType);
auto constant = graph.AddLayer<ConstantLayer>("constant");
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(outputTensorInfo);
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
BOOST_TEST_CHECKPOINT("created constant layer");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 602575b352..69f96d43a3 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -14,7 +14,7 @@
#include <armnn/backends/IBackendInternal.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
#include <boost/test/unit_test.hpp>
@@ -603,7 +603,7 @@ BOOST_AUTO_TEST_CASE(CheckGraphConstTensorSharing)
float weight = 1.0f;
armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32}, &weight);
- fcLayer->m_Weight = std::make_shared<armnn::ScopedCpuTensorHandle>(constTensor);;
+ fcLayer->m_Weight = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
// point sharedWeightPtr to graph1's const tensor
sharedWeightPtr = fcLayer->m_Weight->GetConstTensor<float>();
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 7e8a898293..d0734d83be 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -18,9 +18,9 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/IBackendInternal.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <boost/test/unit_test.hpp>
@@ -45,23 +45,23 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
unsigned int numUnits = 4;
unsigned int outputSize = 4;
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
@@ -76,11 +76,11 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
if (!layerDesc.m_CifgEnabled)
{
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_CifgParameters.m_InputToInputWeights->Allocate();
layer->m_CifgParameters.m_RecurrentToInputWeights->Allocate();
@@ -89,9 +89,9 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
if (layerDesc.m_ProjectionEnabled)
{
- layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ outputSize, numUnits }, DataType::Float32));
- layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<ScopedTensorHandle>
(TensorInfo({ outputSize }, DataType::Float32));
layer->m_ProjectionParameters.m_ProjectionWeights->Allocate();
layer->m_ProjectionParameters.m_ProjectionBias->Allocate();
@@ -101,13 +101,13 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
{
if (!layerDesc.m_CifgEnabled)
{
- layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToInputWeights->Allocate();
}
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
(TensorInfo({ numUnits }, DataType::Float32));
layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
@@ -276,7 +276,7 @@ void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
input->GetOutputSlot().SetTensorInfo(inputInfo);
Convolution2dLayer* layer = graph.AddLayer<Convolution2dLayer>(desc, "conv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -326,7 +326,7 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
input->GetOutputSlot().SetTensorInfo(inputInfo);
DepthwiseConvolution2dLayer* layer = graph.AddLayer<DepthwiseConvolution2dLayer>(desc, "depthwiseConv2d");
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
layer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -529,7 +529,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
descriptor.m_MaxDetections = 3;
DetectionPostProcessLayer* layer = graph.AddLayer<DetectionPostProcessLayer>(descriptor, "detectionPostProcess");
- layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(anchors);
+ layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(anchors);
layer->GetOutputSlot(0).SetTensorInfo(detectionBoxesInfo);
layer->GetOutputSlot(1).SetTensorInfo(detectionScoresInfo);
layer->GetOutputSlot(2).SetTensorInfo(detectionClassesInfo);
@@ -571,7 +571,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
- conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ conv2dLayer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -1211,16 +1211,16 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
conv->GetOutputSlot().SetTensorInfo(outputInfo);
batchNorm->GetOutputSlot().SetTensorInfo(outputInfo);
- conv->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
- batchNorm->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta);
- batchNorm->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma);
- batchNorm->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean);
- batchNorm->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance);
+ conv->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
+ batchNorm->m_Beta = std::make_unique<ScopedTensorHandle>(beta);
+ batchNorm->m_Gamma = std::make_unique<ScopedTensorHandle>(gamma);
+ batchNorm->m_Mean = std::make_unique<ScopedTensorHandle>(mean);
+ batchNorm->m_Variance = std::make_unique<ScopedTensorHandle>(variance);
if (convolution2dDescriptor.m_BiasEnabled)
{
std::vector<float> biasVector = { 11 };
ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32), biasVector);
- conv->m_Bias = std::make_unique<ScopedCpuTensorHandle>(bias);
+ conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
}
// Connect layers
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 25b0feaded..fa3f400569 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -9,8 +9,8 @@
#include <Graph.hpp>
#include <InternalTypes.hpp>
#include <layers/FullyConnectedLayer.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <string>
@@ -240,7 +240,7 @@ BOOST_AUTO_TEST_CASE(ConstantTesst)
const float Datum = 0.0f;
ConstTensor output0({outputShape, DataType::Float32}, &Datum);
- layer->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(output0);
+ layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
@@ -294,7 +294,7 @@ BOOST_AUTO_TEST_CASE(Convolution2dTest)
const float Datum = 0.0f;
ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
}
@@ -339,7 +339,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
const float Datum = 0.0f;
ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
}
@@ -379,7 +379,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
descriptor,
"detectionpostprocess");
- layer->m_Anchors = std::make_unique<ScopedCpuTensorHandle>(anchorsTensor);
+ layer->m_Anchors = std::make_unique<ScopedTensorHandle>(anchorsTensor);
RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
}
@@ -415,7 +415,7 @@ BOOST_AUTO_TEST_CASE(FullyConnectedTest)
const float Datum = 0.0f;
ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights);
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
}
@@ -469,18 +469,18 @@ BOOST_AUTO_TEST_CASE(LstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
}
@@ -557,18 +557,18 @@ BOOST_AUTO_TEST_CASE(QLstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
@@ -585,18 +585,18 @@ BOOST_AUTO_TEST_CASE(QuantizedLstmTest)
float Datum = 0.0f;
ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
- layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
- layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedCpuTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
+ layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<ScopedTensorHandle>(constTensor);
RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 73ef8bea91..ecb876dc7a 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <fstream>
#include <map>
#include <queue>
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 4523e70437..d0d728bfab 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -299,7 +299,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
uint8_t tensor[] = { 1, 1, 1, 1, 1 };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(info1, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(info1, &tensor));
input->GetOutputSlot().SetTensorInfo(info0);
constant->GetOutputSlot().SetTensorInfo(info1);
@@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
input->GetOutputSlot().SetTensorInfo(inputInfo);
constant->GetOutputSlot().SetTensorInfo(constantTermInfo);
float tensor[] = { 2.0f };
- constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(constantTermInfo, &tensor));
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(ConstTensor(constantTermInfo, &tensor));
add1->GetOutputSlot().SetTensorInfo(outputInfo);
input->GetOutputSlot().Connect(add1->GetInputSlot(0));
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index bb8e674b56..e4c1f2f413 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 12df462456..1dfe7f431c 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 7d7c6b2b0a..1ddf5262e8 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -31,7 +31,7 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index a65012eef4..f93fa77b0d 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -72,8 +72,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
armnn::Convolution2dDescriptor descriptor;
auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- conv->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ conv->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ conv->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
conv->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
@@ -142,8 +142,8 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
armnn::FullyConnectedDescriptor descriptor;
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
- fc->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
+ fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ fc->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(bias);
fc->GetOutputSlot().SetTensorInfo(infoFP32);
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index cf6da807ca..2b48532961 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -4,9 +4,8 @@
#
list(APPEND armnnBackendsCommon_sources
- CpuTensorHandle.cpp
- CpuTensorHandle.hpp
- CpuTensorHandleFwd.hpp
+ TensorHandle.cpp
+ TensorHandle.hpp
DynamicBackend.cpp
DynamicBackend.hpp
DynamicBackendUtils.cpp
diff --git a/src/backends/backendsCommon/CpuTensorHandleFwd.hpp b/src/backends/backendsCommon/CpuTensorHandleFwd.hpp
deleted file mode 100644
index aef48b5a92..0000000000
--- a/src/backends/backendsCommon/CpuTensorHandleFwd.hpp
+++ /dev/null
@@ -1,9 +0,0 @@
-//
-// Copyright © 2019 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-// This file is depricated and will be removed soon.
-// Please use the new header in armnn/backends instead.
-// This will use the new armnn/backends header.
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
diff --git a/src/backends/backendsCommon/MemCopyWorkload.cpp b/src/backends/backendsCommon/MemCopyWorkload.cpp
index 813adefed7..946de30430 100644
--- a/src/backends/backendsCommon/MemCopyWorkload.cpp
+++ b/src/backends/backendsCommon/MemCopyWorkload.cpp
@@ -6,7 +6,7 @@
#include <ResolveType.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
diff --git a/src/backends/backendsCommon/MemCopyWorkload.hpp b/src/backends/backendsCommon/MemCopyWorkload.hpp
index 12664fd527..99845f397f 100644
--- a/src/backends/backendsCommon/MemCopyWorkload.hpp
+++ b/src/backends/backendsCommon/MemCopyWorkload.hpp
@@ -7,7 +7,7 @@
#include "Workload.hpp"
#include "WorkloadUtils.hpp"
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <utility>
diff --git a/src/backends/backendsCommon/MemImportWorkload.cpp b/src/backends/backendsCommon/MemImportWorkload.cpp
index 6584e407e9..844908f27b 100644
--- a/src/backends/backendsCommon/MemImportWorkload.cpp
+++ b/src/backends/backendsCommon/MemImportWorkload.cpp
@@ -6,7 +6,7 @@
#include <ResolveType.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cstring>
diff --git a/src/backends/backendsCommon/MemImportWorkload.hpp b/src/backends/backendsCommon/MemImportWorkload.hpp
index 33297fbc6f..d3c57239bd 100644
--- a/src/backends/backendsCommon/MemImportWorkload.hpp
+++ b/src/backends/backendsCommon/MemImportWorkload.hpp
@@ -7,7 +7,7 @@
#include "Workload.hpp"
#include "WorkloadUtils.hpp"
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <utility>
diff --git a/src/backends/backendsCommon/MemSyncWorkload.cpp b/src/backends/backendsCommon/MemSyncWorkload.cpp
index fe04a3024b..9025e665c9 100644
--- a/src/backends/backendsCommon/MemSyncWorkload.cpp
+++ b/src/backends/backendsCommon/MemSyncWorkload.cpp
@@ -6,7 +6,7 @@
#include <ResolveType.hpp>
#include <backendsCommon/MemSyncWorkload.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cstring>
diff --git a/src/backends/backendsCommon/MemSyncWorkload.hpp b/src/backends/backendsCommon/MemSyncWorkload.hpp
index 8142f180a6..7b59a0b55f 100644
--- a/src/backends/backendsCommon/MemSyncWorkload.hpp
+++ b/src/backends/backendsCommon/MemSyncWorkload.hpp
@@ -7,7 +7,7 @@
#include "Workload.hpp"
#include "WorkloadUtils.hpp"
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <utility>
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/TensorHandle.cpp
index 192469a633..d4660d6de3 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/TensorHandle.cpp
@@ -1,11 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/Exceptions.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cstring>
@@ -28,54 +28,54 @@ TensorShape GetUnpaddedTensorStrides(const TensorInfo& tensorInfo)
return TensorShape(shape.GetNumDimensions(), strides.data());
}
-ConstCpuTensorHandle::ConstCpuTensorHandle(const TensorInfo& tensorInfo)
+ConstTensorHandle::ConstTensorHandle(const TensorInfo& tensorInfo)
: m_TensorInfo(tensorInfo)
, m_Memory(nullptr)
{
}
template <>
-const void* ConstCpuTensorHandle::GetConstTensor<void>() const
+const void* ConstTensorHandle::GetConstTensor<void>() const
{
return m_Memory;
}
-CpuTensorHandle::CpuTensorHandle(const TensorInfo& tensorInfo)
-: ConstCpuTensorHandle(tensorInfo)
+TensorHandle::TensorHandle(const TensorInfo& tensorInfo)
+: ConstTensorHandle(tensorInfo)
, m_MutableMemory(nullptr)
{
}
template <>
-void* CpuTensorHandle::GetTensor<void>() const
+void* TensorHandle::GetTensor<void>() const
{
return m_MutableMemory;
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const TensorInfo& tensorInfo)
-: CpuTensorHandle(tensorInfo)
+ScopedTensorHandle::ScopedTensorHandle(const TensorInfo& tensorInfo)
+: TensorHandle(tensorInfo)
{
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ConstTensor& tensor)
-: ScopedCpuTensorHandle(tensor.GetInfo())
+ScopedTensorHandle::ScopedTensorHandle(const ConstTensor& tensor)
+: ScopedTensorHandle(tensor.GetInfo())
{
CopyFrom(tensor.GetMemoryArea(), tensor.GetNumBytes());
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ConstCpuTensorHandle& tensorHandle)
-: ScopedCpuTensorHandle(tensorHandle.GetTensorInfo())
+ScopedTensorHandle::ScopedTensorHandle(const ConstTensorHandle& tensorHandle)
+: ScopedTensorHandle(tensorHandle.GetTensorInfo())
{
CopyFrom(tensorHandle.GetConstTensor<void>(), tensorHandle.GetTensorInfo().GetNumBytes());
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ScopedCpuTensorHandle& other)
-: CpuTensorHandle(other.GetTensorInfo())
+ScopedTensorHandle::ScopedTensorHandle(const ScopedTensorHandle& other)
+: TensorHandle(other.GetTensorInfo())
{
CopyFrom(other);
}
-ScopedCpuTensorHandle& ScopedCpuTensorHandle::operator=(const ScopedCpuTensorHandle& other)
+ScopedTensorHandle& ScopedTensorHandle::operator=(const ScopedTensorHandle& other)
{
::operator delete(GetTensor<void>());
SetMemory(nullptr);
@@ -83,12 +83,12 @@ ScopedCpuTensorHandle& ScopedCpuTensorHandle::operator=(const ScopedCpuTensorHan
return *this;
}
-ScopedCpuTensorHandle::~ScopedCpuTensorHandle()
+ScopedTensorHandle::~ScopedTensorHandle()
{
::operator delete(GetTensor<void>());
}
-void ScopedCpuTensorHandle::Allocate()
+void ScopedTensorHandle::Allocate()
{
if (GetTensor<void>() == nullptr)
{
@@ -96,27 +96,27 @@ void ScopedCpuTensorHandle::Allocate()
}
else
{
- throw InvalidArgumentException("CpuTensorHandle::Allocate Trying to allocate a CpuTensorHandle"
+ throw InvalidArgumentException("TensorHandle::Allocate Trying to allocate a TensorHandle"
"that already has allocated memory.");
}
}
-void ScopedCpuTensorHandle::CopyOutTo(void* memory) const
+void ScopedTensorHandle::CopyOutTo(void* memory) const
{
memcpy(memory, GetTensor<void>(), GetTensorInfo().GetNumBytes());
}
-void ScopedCpuTensorHandle::CopyInFrom(const void* memory)
+void ScopedTensorHandle::CopyInFrom(const void* memory)
{
memcpy(GetTensor<void>(), memory, GetTensorInfo().GetNumBytes());
}
-void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
+void ScopedTensorHandle::CopyFrom(const ScopedTensorHandle& other)
{
CopyFrom(other.GetTensor<void>(), other.GetTensorInfo().GetNumBytes());
}
-void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
+void ScopedTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
{
ARMNN_ASSERT(GetTensor<void>() == nullptr);
ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
@@ -128,14 +128,14 @@ void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numByte
}
}
-void PassthroughCpuTensorHandle::Allocate()
+void PassthroughTensorHandle::Allocate()
{
- throw InvalidArgumentException("PassthroughCpuTensorHandle::Allocate() should never be called");
+ throw InvalidArgumentException("PassthroughTensorHandle::Allocate() should never be called");
}
-void ConstPassthroughCpuTensorHandle::Allocate()
+void ConstPassthroughTensorHandle::Allocate()
{
- throw InvalidArgumentException("ConstPassthroughCpuTensorHandle::Allocate() should never be called");
+ throw InvalidArgumentException("ConstPassthroughTensorHandle::Allocate() should never be called");
}
} // namespace armnn
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/TensorHandle.hpp
index fdd2439b41..4e9d87d6eb 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/TensorHandle.hpp
@@ -1,11 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <armnn/backends/ITensorHandle.hpp>
#include <armnn/TypesUtils.hpp>
@@ -23,8 +23,8 @@ namespace armnn
// of a tensor, assuming fully packed data with no padding
TensorShape GetUnpaddedTensorStrides(const TensorInfo& tensorInfo);
-// Abstract tensor handles wrapping a CPU-readable region of memory, interpreting it as tensor data.
-class ConstCpuTensorHandle : public ITensorHandle
+// Abstract tensor handles wrapping a readable region of memory, interpreting it as tensor data.
+class ConstTensorHandle : public ITensorHandle
{
public:
template <typename T>
@@ -53,7 +53,7 @@ public:
TensorShape GetShape() const override { return m_TensorInfo.GetShape(); }
protected:
- ConstCpuTensorHandle(const TensorInfo& tensorInfo);
+ ConstTensorHandle(const TensorInfo& tensorInfo);
void SetConstMemory(const void* mem) { m_Memory = mem; }
@@ -62,18 +62,18 @@ private:
void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
- ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
- ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
+ ConstTensorHandle(const ConstTensorHandle& other) = delete;
+ ConstTensorHandle& operator=(const ConstTensorHandle& other) = delete;
TensorInfo m_TensorInfo;
const void* m_Memory;
};
template<>
-const void* ConstCpuTensorHandle::GetConstTensor<void>() const;
+const void* ConstTensorHandle::GetConstTensor<void>() const;
-// Abstract specialization of ConstCpuTensorHandle that allows write access to the same data.
-class CpuTensorHandle : public ConstCpuTensorHandle
+// Abstract specialization of ConstTensorHandle that allows write access to the same data.
+class TensorHandle : public ConstTensorHandle
{
public:
template <typename T>
@@ -84,7 +84,7 @@ public:
}
protected:
- CpuTensorHandle(const TensorInfo& tensorInfo);
+ TensorHandle(const TensorInfo& tensorInfo);
void SetMemory(void* mem)
{
@@ -94,29 +94,29 @@ protected:
private:
- CpuTensorHandle(const CpuTensorHandle& other) = delete;
- CpuTensorHandle& operator=(const CpuTensorHandle& other) = delete;
+ TensorHandle(const TensorHandle& other) = delete;
+ TensorHandle& operator=(const TensorHandle& other) = delete;
void* m_MutableMemory;
};
template <>
-void* CpuTensorHandle::GetTensor<void>() const;
+void* TensorHandle::GetTensor<void>() const;
-// A CpuTensorHandle that owns the wrapped memory region.
-class ScopedCpuTensorHandle : public CpuTensorHandle
+// A TensorHandle that owns the wrapped memory region.
+class ScopedTensorHandle : public TensorHandle
{
public:
- explicit ScopedCpuTensorHandle(const TensorInfo& tensorInfo);
+ explicit ScopedTensorHandle(const TensorInfo& tensorInfo);
// Copies contents from Tensor.
- explicit ScopedCpuTensorHandle(const ConstTensor& tensor);
+ explicit ScopedTensorHandle(const ConstTensor& tensor);
- // Copies contents from ConstCpuTensorHandle
- explicit ScopedCpuTensorHandle(const ConstCpuTensorHandle& tensorHandle);
+ // Copies contents from ConstTensorHandle
+ explicit ScopedTensorHandle(const ConstTensorHandle& tensorHandle);
- ScopedCpuTensorHandle(const ScopedCpuTensorHandle& other);
- ScopedCpuTensorHandle& operator=(const ScopedCpuTensorHandle& other);
- ~ScopedCpuTensorHandle();
+ ScopedTensorHandle(const ScopedTensorHandle& other);
+ ScopedTensorHandle& operator=(const ScopedTensorHandle& other);
+ ~ScopedTensorHandle();
virtual void Allocate() override;
@@ -125,21 +125,21 @@ private:
void CopyOutTo(void* memory) const override;
void CopyInFrom(const void* memory) override;
- void CopyFrom(const ScopedCpuTensorHandle& other);
+ void CopyFrom(const ScopedTensorHandle& other);
void CopyFrom(const void* srcMemory, unsigned int numBytes);
};
-// A CpuTensorHandle that wraps an already allocated memory region.
+// A TensorHandle that wraps an already allocated memory region.
//
// Clients must make sure the passed in memory region stays alive for the lifetime of
-// the PassthroughCpuTensorHandle instance.
+// the PassthroughTensorHandle instance.
//
-// Note there is no polymorphism to/from ConstPassthroughCpuTensorHandle.
-class PassthroughCpuTensorHandle : public CpuTensorHandle
+// Note there is no polymorphism to/from ConstPassthroughTensorHandle.
+class PassthroughTensorHandle : public TensorHandle
{
public:
- PassthroughCpuTensorHandle(const TensorInfo& tensorInfo, void* mem)
- : CpuTensorHandle(tensorInfo)
+ PassthroughTensorHandle(const TensorInfo& tensorInfo, void* mem)
+ : TensorHandle(tensorInfo)
{
SetMemory(mem);
}
@@ -147,18 +147,18 @@ public:
virtual void Allocate() override;
};
-// A ConstCpuTensorHandle that wraps an already allocated memory region.
+// A ConstTensorHandle that wraps an already allocated memory region.
//
// This allows users to pass in const memory to a network.
// Clients must make sure the passed in memory region stays alive for the lifetime of
-// the PassthroughCpuTensorHandle instance.
+// the PassthroughTensorHandle instance.
//
-// Note there is no polymorphism to/from PassthroughCpuTensorHandle.
-class ConstPassthroughCpuTensorHandle : public ConstCpuTensorHandle
+// Note there is no polymorphism to/from PassthroughTensorHandle.
+class ConstPassthroughTensorHandle : public ConstTensorHandle
{
public:
- ConstPassthroughCpuTensorHandle(const TensorInfo& tensorInfo, const void* mem)
- : ConstCpuTensorHandle(tensorInfo)
+ ConstPassthroughTensorHandle(const TensorInfo& tensorInfo, const void* mem)
+ : ConstTensorHandle(tensorInfo)
{
SetConstMemory(mem);
}
@@ -170,16 +170,16 @@ public:
// Template specializations.
template <>
-const void* ConstCpuTensorHandle::GetConstTensor() const;
+const void* ConstTensorHandle::GetConstTensor() const;
template <>
-void* CpuTensorHandle::GetTensor() const;
+void* TensorHandle::GetTensor() const;
class ManagedConstTensorHandle
{
public:
- explicit ManagedConstTensorHandle(std::shared_ptr<ConstCpuTensorHandle> ptr)
+ explicit ManagedConstTensorHandle(std::shared_ptr<ConstTensorHandle> ptr)
: m_Mapped(false)
, m_TensorHandle(std::move(ptr)) {};
@@ -200,7 +200,7 @@ public:
}
// Delete copy constructor as it's unnecessary
- ManagedConstTensorHandle(const ConstCpuTensorHandle& other) = delete;
+ ManagedConstTensorHandle(const ConstTensorHandle& other) = delete;
// Delete copy assignment as it's unnecessary
ManagedConstTensorHandle& operator=(const ManagedConstTensorHandle& other) = delete;
@@ -239,7 +239,19 @@ public:
private:
bool m_Mapped;
- std::shared_ptr<ConstCpuTensorHandle> m_TensorHandle;
+ std::shared_ptr<ConstTensorHandle> m_TensorHandle;
};
+using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstCpuTensorHandle is deprecated, "
+ "use ConstTensorHandle instead") = ConstTensorHandle;
+using CpuTensorHandle ARMNN_DEPRECATED_MSG("CpuTensorHandle is deprecated, "
+ "use TensorHandle instead") = TensorHandle;
+using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG("ScopedCpuTensorHandle is deprecated, "
+ "use ScopedTensorHandle instead") = ScopedTensorHandle;
+using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("PassthroughCpuTensorHandle is deprecated, use "
+ "PassthroughTensorHandle instead") = PassthroughTensorHandle;
+using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstPassthroughCpuTensorHandle is "
+ "deprecated, use ConstPassthroughTensorHandle "
+ "instead") = ConstPassthroughTensorHandle;
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 470d460ef3..be0ac707a8 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -3,8 +3,8 @@
// SPDX-License-Identifier: MIT
//
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index abaa4f5185..77d4209657 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -4,7 +4,7 @@
//
#pragma once
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <armnn/backends/ITensorHandle.hpp>
#include <InternalTypes.hpp>
@@ -175,8 +175,8 @@ struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnec
{
}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -202,8 +202,8 @@ struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2
{
}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -217,8 +217,8 @@ struct DepthwiseConvolution2dQueueDescriptor : QueueDescriptorWithParameters<Dep
{
}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -230,7 +230,7 @@ struct DetectionPostProcessQueueDescriptor : QueueDescriptorWithParameters<Detec
{
}
- const ConstCpuTensorHandle* m_Anchors;
+ const ConstTensorHandle* m_Anchors;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -305,10 +305,10 @@ struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNo
{
}
- const ConstCpuTensorHandle* m_Mean;
- const ConstCpuTensorHandle* m_Variance;
- const ConstCpuTensorHandle* m_Beta;
- const ConstCpuTensorHandle* m_Gamma;
+ const ConstTensorHandle* m_Mean;
+ const ConstTensorHandle* m_Variance;
+ const ConstTensorHandle* m_Beta;
+ const ConstTensorHandle* m_Gamma;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -336,8 +336,8 @@ struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuant
{
}
- const ConstCpuTensorHandle* m_Min;
- const ConstCpuTensorHandle* m_Max;
+ const ConstTensorHandle* m_Min;
+ const ConstTensorHandle* m_Max;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -364,7 +364,7 @@ struct ConstantQueueDescriptor : QueueDescriptor
{
}
- const ConstCpuTensorHandle* m_LayerOutput;
+ const ConstTensorHandle* m_LayerOutput;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -416,27 +416,27 @@ struct LstmQueueDescriptor : QueueDescriptorWithParameters<LstmDescriptor>
{
}
- const ConstCpuTensorHandle* m_InputToInputWeights;
- const ConstCpuTensorHandle* m_InputToForgetWeights;
- const ConstCpuTensorHandle* m_InputToCellWeights;
- const ConstCpuTensorHandle* m_InputToOutputWeights;
- const ConstCpuTensorHandle* m_RecurrentToInputWeights;
- const ConstCpuTensorHandle* m_RecurrentToForgetWeights;
- const ConstCpuTensorHandle* m_RecurrentToCellWeights;
- const ConstCpuTensorHandle* m_RecurrentToOutputWeights;
- const ConstCpuTensorHandle* m_CellToInputWeights;
- const ConstCpuTensorHandle* m_CellToForgetWeights;
- const ConstCpuTensorHandle* m_CellToOutputWeights;
- const ConstCpuTensorHandle* m_InputGateBias;
- const ConstCpuTensorHandle* m_ForgetGateBias;
- const ConstCpuTensorHandle* m_CellBias;
- const ConstCpuTensorHandle* m_OutputGateBias;
- const ConstCpuTensorHandle* m_ProjectionWeights;
- const ConstCpuTensorHandle* m_ProjectionBias;
- const ConstCpuTensorHandle* m_InputLayerNormWeights;
- const ConstCpuTensorHandle* m_ForgetLayerNormWeights;
- const ConstCpuTensorHandle* m_CellLayerNormWeights;
- const ConstCpuTensorHandle* m_OutputLayerNormWeights;
+ const ConstTensorHandle* m_InputToInputWeights;
+ const ConstTensorHandle* m_InputToForgetWeights;
+ const ConstTensorHandle* m_InputToCellWeights;
+ const ConstTensorHandle* m_InputToOutputWeights;
+ const ConstTensorHandle* m_RecurrentToInputWeights;
+ const ConstTensorHandle* m_RecurrentToForgetWeights;
+ const ConstTensorHandle* m_RecurrentToCellWeights;
+ const ConstTensorHandle* m_RecurrentToOutputWeights;
+ const ConstTensorHandle* m_CellToInputWeights;
+ const ConstTensorHandle* m_CellToForgetWeights;
+ const ConstTensorHandle* m_CellToOutputWeights;
+ const ConstTensorHandle* m_InputGateBias;
+ const ConstTensorHandle* m_ForgetGateBias;
+ const ConstTensorHandle* m_CellBias;
+ const ConstTensorHandle* m_OutputGateBias;
+ const ConstTensorHandle* m_ProjectionWeights;
+ const ConstTensorHandle* m_ProjectionBias;
+ const ConstTensorHandle* m_InputLayerNormWeights;
+ const ConstTensorHandle* m_ForgetLayerNormWeights;
+ const ConstTensorHandle* m_CellLayerNormWeights;
+ const ConstTensorHandle* m_OutputLayerNormWeights;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -543,8 +543,8 @@ struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters<Tra
m_Bias(nullptr)
{}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -581,27 +581,27 @@ struct QLstmQueueDescriptor : QueueDescriptorWithParameters<QLstmDescriptor>
{
}
- const ConstCpuTensorHandle* m_InputToInputWeights;
- const ConstCpuTensorHandle* m_InputToForgetWeights;
- const ConstCpuTensorHandle* m_InputToCellWeights;
- const ConstCpuTensorHandle* m_InputToOutputWeights;
- const ConstCpuTensorHandle* m_RecurrentToInputWeights;
- const ConstCpuTensorHandle* m_RecurrentToForgetWeights;
- const ConstCpuTensorHandle* m_RecurrentToCellWeights;
- const ConstCpuTensorHandle* m_RecurrentToOutputWeights;
- const ConstCpuTensorHandle* m_CellToInputWeights;
- const ConstCpuTensorHandle* m_CellToForgetWeights;
- const ConstCpuTensorHandle* m_CellToOutputWeights;
- const ConstCpuTensorHandle* m_InputGateBias;
- const ConstCpuTensorHandle* m_ForgetGateBias;
- const ConstCpuTensorHandle* m_CellBias;
- const ConstCpuTensorHandle* m_OutputGateBias;
- const ConstCpuTensorHandle* m_ProjectionWeights;
- const ConstCpuTensorHandle* m_ProjectionBias;
- const ConstCpuTensorHandle* m_InputLayerNormWeights;
- const ConstCpuTensorHandle* m_ForgetLayerNormWeights;
- const ConstCpuTensorHandle* m_CellLayerNormWeights;
- const ConstCpuTensorHandle* m_OutputLayerNormWeights;
+ const ConstTensorHandle* m_InputToInputWeights;
+ const ConstTensorHandle* m_InputToForgetWeights;
+ const ConstTensorHandle* m_InputToCellWeights;
+ const ConstTensorHandle* m_InputToOutputWeights;
+ const ConstTensorHandle* m_RecurrentToInputWeights;
+ const ConstTensorHandle* m_RecurrentToForgetWeights;
+ const ConstTensorHandle* m_RecurrentToCellWeights;
+ const ConstTensorHandle* m_RecurrentToOutputWeights;
+ const ConstTensorHandle* m_CellToInputWeights;
+ const ConstTensorHandle* m_CellToForgetWeights;
+ const ConstTensorHandle* m_CellToOutputWeights;
+ const ConstTensorHandle* m_InputGateBias;
+ const ConstTensorHandle* m_ForgetGateBias;
+ const ConstTensorHandle* m_CellBias;
+ const ConstTensorHandle* m_OutputGateBias;
+ const ConstTensorHandle* m_ProjectionWeights;
+ const ConstTensorHandle* m_ProjectionBias;
+ const ConstTensorHandle* m_InputLayerNormWeights;
+ const ConstTensorHandle* m_ForgetLayerNormWeights;
+ const ConstTensorHandle* m_CellLayerNormWeights;
+ const ConstTensorHandle* m_OutputLayerNormWeights;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -625,20 +625,20 @@ struct QuantizedLstmQueueDescriptor : QueueDescriptor
, m_OutputGateBias(nullptr)
{}
- const ConstCpuTensorHandle* m_InputToInputWeights;
- const ConstCpuTensorHandle* m_InputToForgetWeights;
- const ConstCpuTensorHandle* m_InputToCellWeights;
- const ConstCpuTensorHandle* m_InputToOutputWeights;
+ const ConstTensorHandle* m_InputToInputWeights;
+ const ConstTensorHandle* m_InputToForgetWeights;
+ const ConstTensorHandle* m_InputToCellWeights;
+ const ConstTensorHandle* m_InputToOutputWeights;
- const ConstCpuTensorHandle* m_RecurrentToInputWeights;
- const ConstCpuTensorHandle* m_RecurrentToForgetWeights;
- const ConstCpuTensorHandle* m_RecurrentToCellWeights;
- const ConstCpuTensorHandle* m_RecurrentToOutputWeights;
+ const ConstTensorHandle* m_RecurrentToInputWeights;
+ const ConstTensorHandle* m_RecurrentToForgetWeights;
+ const ConstTensorHandle* m_RecurrentToCellWeights;
+ const ConstTensorHandle* m_RecurrentToOutputWeights;
- const ConstCpuTensorHandle* m_InputGateBias;
- const ConstCpuTensorHandle* m_ForgetGateBias;
- const ConstCpuTensorHandle* m_CellBias;
- const ConstCpuTensorHandle* m_OutputGateBias;
+ const ConstTensorHandle* m_InputGateBias;
+ const ConstTensorHandle* m_ForgetGateBias;
+ const ConstTensorHandle* m_CellBias;
+ const ConstTensorHandle* m_OutputGateBias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 733d77e427..c5fc9d0fe2 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -15,7 +15,7 @@
#include <armnn/utility/TransformIterator.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 5886630cd9..c8105aea04 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -11,7 +11,7 @@
namespace armnn
{
-armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
+armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
const PermutationVector& permutationVector, void* permuteBuffer)
{
ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
@@ -130,7 +130,7 @@ TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, D
return weightPermutedInfo;
}
-armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor,
+armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* weightTensor,
DataLayout dataLayout,
void* permuteBuffer)
{
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 354362ec8f..06d2eccf3e 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include "CpuTensorHandle.hpp"
+#include "TensorHandle.hpp"
#include <armnn/backends/ITensorHandle.hpp>
#include <armnn/Tensor.hpp>
@@ -206,7 +206,7 @@ void GatherTensorHandlePairs(const DescriptorType& descriptor,
int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim);
-armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
+armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
const PermutationVector& permutationVector,
void* permuteBuffer);
@@ -214,7 +214,7 @@ void ReshapeWeightsForAcl(TensorInfo& weightInfo, DataLayout dataLayout);
TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, DataLayout dataLayout);
-armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor,
+armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* weightTensor,
DataLayout dataLayout,
void* permuteBuffer);
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 6e4a8c75d2..63d768eca5 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -8,7 +8,7 @@
# file in the root of ArmNN
COMMON_SOURCES := \
- CpuTensorHandle.cpp \
+ TensorHandle.cpp \
DynamicBackend.cpp \
DynamicBackendUtils.cpp \
IBackendInternal.cpp \
diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp
index 8c4da621ed..99412b9694 100644
--- a/src/backends/backendsCommon/test/CommonTestUtils.hpp
+++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp
@@ -13,7 +13,7 @@
#include <armnn/BackendRegistry.hpp>
#include <armnn/Types.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <test/TestUtils.hpp>
@@ -72,8 +72,8 @@ bool Compare(T a, T b, float tolerance = 0.000001f)
template <typename ConvolutionLayer>
void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo)
{
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weightInfo);
- layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(biasInfo);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weightInfo);
+ layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(biasInfo);
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index 56a794e77c..2dd5298059 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -5,7 +5,7 @@
#include <armnn/Exceptions.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
#include <boost/test/unit_test.hpp>
@@ -121,15 +121,15 @@ BOOST_AUTO_TEST_CASE(TestAsyncExecute)
ConstTensor constInputTensor(info, inVals);
ConstTensor constOutputTensor(info, outVals);
- ScopedCpuTensorHandle syncInput0(constInputTensor);
- ScopedCpuTensorHandle syncOutput0(constOutputTensor);
+ ScopedTensorHandle syncInput0(constInputTensor);
+ ScopedTensorHandle syncOutput0(constOutputTensor);
std::unique_ptr<Workload0> workload0 = CreateWorkload<Workload0>(info, &syncInput0, &syncOutput0);
workload0.get()->Execute();
- ScopedCpuTensorHandle asyncInput0(constInputTensor);
- ScopedCpuTensorHandle asyncOutput0(constOutputTensor);
+ ScopedTensorHandle asyncInput0(constInputTensor);
+ ScopedTensorHandle asyncOutput0(constOutputTensor);
WorkingMemDescriptor workingMemDescriptor0;
workingMemDescriptor0.m_Inputs = std::vector<ITensorHandle*>{&asyncInput0};
@@ -159,13 +159,13 @@ BOOST_AUTO_TEST_CASE(TestDefaultAsyncExecute)
ConstTensor constOutputTensor(info, outVals);
ConstTensor defaultTensor(info, &defaultVals);
- ScopedCpuTensorHandle defaultInput = ScopedCpuTensorHandle(defaultTensor);
- ScopedCpuTensorHandle defaultOutput = ScopedCpuTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultInput = ScopedTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultOutput = ScopedTensorHandle(defaultTensor);
std::unique_ptr<Workload1> workload1 = CreateWorkload<Workload1>(info, &defaultInput, &defaultOutput);
- ScopedCpuTensorHandle asyncInput(constInputTensor);
- ScopedCpuTensorHandle asyncOutput(constOutputTensor);
+ ScopedTensorHandle asyncInput(constInputTensor);
+ ScopedTensorHandle asyncOutput(constOutputTensor);
WorkingMemDescriptor workingMemDescriptor;
workingMemDescriptor.m_Inputs = std::vector<ITensorHandle*>{&asyncInput};
@@ -202,20 +202,20 @@ BOOST_AUTO_TEST_CASE(TestDefaultAsyncExeuteWithThreads)
ConstTensor defaultTensor(info, &defaultVals);
- ScopedCpuTensorHandle defaultInput = ScopedCpuTensorHandle(defaultTensor);
- ScopedCpuTensorHandle defaultOutput = ScopedCpuTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultInput = ScopedTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultOutput = ScopedTensorHandle(defaultTensor);
std::unique_ptr<Workload1> workload = CreateWorkload<Workload1>(info, &defaultInput, &defaultOutput);
- ScopedCpuTensorHandle asyncInput1(constInputTensor1);
- ScopedCpuTensorHandle asyncOutput1(constOutputTensor1);
+ ScopedTensorHandle asyncInput1(constInputTensor1);
+ ScopedTensorHandle asyncOutput1(constOutputTensor1);
WorkingMemDescriptor workingMemDescriptor1;
workingMemDescriptor1.m_Inputs = std::vector<ITensorHandle*>{&asyncInput1};
workingMemDescriptor1.m_Outputs = std::vector<ITensorHandle*>{&asyncOutput1};
- ScopedCpuTensorHandle asyncInput2(constInputTensor2);
- ScopedCpuTensorHandle asyncOutput2(constOutputTensor2);
+ ScopedTensorHandle asyncInput2(constInputTensor2);
+ ScopedTensorHandle asyncOutput2(constOutputTensor2);
WorkingMemDescriptor workingMemDescriptor2;
workingMemDescriptor2.m_Inputs = std::vector<ITensorHandle*>{&asyncInput2};
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 8302bfd57d..a4f1613a58 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -9,8 +9,8 @@
#include <armnn/backends/DynamicBackend.hpp>
#include <armnn/backends/ILayerSupport.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/DynamicBackendUtils.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <Filesystem.hpp>
#include <reference/workloads/RefConvolution2dWorkload.hpp>
#include <Runtime.hpp>
@@ -1473,7 +1473,7 @@ void CreateReferenceDynamicBackendTestImpl()
{ outputInfo }
};
convolution2dQueueDescriptor.m_Inputs.push_back(nullptr);
- auto weights = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ auto weights = std::make_unique<ScopedTensorHandle>(weightInfo);
convolution2dQueueDescriptor.m_Weight = weights.get();
// Create a convolution workload with the dummy settings
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index b73efbe26c..4240bb1061 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -83,13 +83,13 @@ struct DummyLayer<armnn::BatchNormalizationLayer>
DummyLayer()
{
m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
- m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -240,9 +240,9 @@ struct DummyConvolutionLayer
desc.m_StrideX = 1;
desc.m_StrideY = 1;
m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -278,7 +278,7 @@ struct DummyLayer<armnn::DetectionPostProcessLayer>
DummyLayer()
{
m_Layer = dummyGraph.AddLayer<armnn::DetectionPostProcessLayer>(armnn::DetectionPostProcessDescriptor(), "");
- m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -299,30 +299,30 @@ struct DummyLstmLayer
desc.m_CifgEnabled = false;
m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
- m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -354,57 +354,57 @@ struct DummyQLstmLayer
m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
// Basic params
- m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
// CIFG optional params
- m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
// Projection optional params
- m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
// Peephole optional params
- m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
// Layer normalization optional params
- m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
}
@@ -423,31 +423,31 @@ struct DummyLayer<armnn::QuantizedLstmLayer, void>
{
m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
- m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
}
@@ -466,7 +466,7 @@ struct DummyLayer<armnn::FullyConnectedLayer>
{
armnn::FullyConnectedLayer::DescriptorType desc;
m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index 817cdeed79..0ca4b0a7f9 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -7,7 +7,7 @@
#include <Graph.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <boost/test/unit_test.hpp>
@@ -35,10 +35,10 @@ BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -87,9 +87,9 @@ BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({2, 3, 5, 3},
armnn::DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>
(TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
layer->m_Weight->Allocate();
@@ -131,8 +131,8 @@ BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({9}, DataType::Float32));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -170,9 +170,9 @@ BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
float inputsQScale = 1.0f;
float outputQScale = 2.0f;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20},
DataType::QAsymmU8, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7},
GetBiasDataType(DataType::QAsymmU8), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 5ac548f42a..182c913777 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -7,7 +7,7 @@
#include <armnn/Exceptions.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
#include <reference/workloads/RefWorkloads.hpp>
@@ -32,7 +32,7 @@ BOOST_AUTO_TEST_CASE(BatchNormalizationQueueDescriptor_Validate_DifferentQuantiz
unsigned int sameShape[] = { 10 };
TensorInfo sameInfo = armnn::TensorInfo(1, sameShape, armnn::DataType::QAsymmU8);
- ScopedCpuTensorHandle sameTensor(sameInfo);
+ ScopedTensorHandle sameTensor(sameInfo);
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
@@ -136,8 +136,8 @@ BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
FullyConnectedQueueDescriptor invalidData;
WorkloadInfo invalidInfo;
- ScopedCpuTensorHandle weightTensor(weightsDesc);
- ScopedCpuTensorHandle biasTensor(biasesDesc);
+ ScopedTensorHandle weightTensor(weightsDesc);
+ ScopedTensorHandle biasTensor(biasesDesc);
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
@@ -515,27 +515,27 @@ BOOST_AUTO_TEST_CASE(LstmQueueDescriptor_Validate)
AddOutputToWorkload(data, info, cellStateOutTensorInfo, nullptr);
// AddOutputToWorkload(data, info, outputTensorInfo, nullptr); is left out
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
data.m_InputToInputWeights = &inputToInputWeightsTensor;
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -657,14 +657,14 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedCpuTensorHandle weightTensor(weightInfo);
+ ScopedTensorHandle weightTensor(weightInfo);
queueDescriptor.m_Weight = &weightTensor;
// Test 1: correct per-axis quantization values
const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
- ScopedCpuTensorHandle biasHandle1(biasInfo1);
+ ScopedTensorHandle biasHandle1(biasInfo1);
queueDescriptor.m_Bias = &biasHandle1;
BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
@@ -673,7 +673,7 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
- ScopedCpuTensorHandle biasHandle2(biasInfo2);
+ ScopedTensorHandle biasHandle2(biasInfo2);
queueDescriptor.m_Bias = &biasHandle2;
BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
@@ -682,7 +682,7 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
- ScopedCpuTensorHandle biasHandle3(biasInfo3);
+ ScopedTensorHandle biasHandle3(biasInfo3);
queueDescriptor.m_Bias = &biasHandle3;
BOOST_CHECK_THROW(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index eb4f461eb9..969d5dbcd1 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -11,7 +11,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <reference/test/RefWorkloadFactoryHelper.hpp>
@@ -74,10 +74,10 @@ LayerTestResult<T, 4> BatchNormTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
armnn::BatchNormalizationQueueDescriptor descriptor;
descriptor.m_Mean = &meanTensor;
@@ -160,10 +160,10 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
@@ -644,10 +644,10 @@ LayerTestResult<float,4> CompareBatchNormTest(
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index 45c94d345b..c28ef40b45 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -11,7 +11,7 @@
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -101,7 +101,7 @@ LayerTestResult<T, 4> ConstantTestImpl(
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
+ armnn::ScopedTensorHandle constantTensor(inputTensorInfo);
AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
armnn::ConstantQueueDescriptor descriptor;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 4641e67aad..8f60415a66 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -13,7 +13,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -318,8 +318,8 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
// Permute the kernel if necessary
boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
if (layout == armnn::DataLayout::NHWC)
@@ -423,10 +423,10 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
armnn::Convolution2dQueueDescriptor data;
@@ -547,8 +547,8 @@ LayerTestResult<T,4> Convolution1dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo);
- armnn::ScopedCpuTensorHandle biasTensor(biasInfo);
+ armnn::ScopedTensorHandle weightsTensor(kernelInfo);
+ armnn::ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
@@ -1349,8 +1349,8 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -1722,11 +1722,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
if (biasEnabled)
{
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -1882,8 +1882,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -2095,8 +2095,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -2252,8 +2252,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
@@ -3007,8 +3007,8 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -3502,8 +3502,8 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
@@ -3756,8 +3756,8 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 3ee1fadd81..f68082762c 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -8,7 +8,7 @@
#include <armnn/Types.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -181,7 +181,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
auto outputScoresHandle = tensorHandleFactory.CreateTensorHandle(detectionScoresInfo);
auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo);
- armnn::ScopedCpuTensorHandle anchorsTensor(anchorsInfo);
+ armnn::ScopedTensorHandle anchorsTensor(anchorsInfo);
AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]);
armnn::DetectionPostProcessQueueDescriptor data;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index f8644007f2..157df99d64 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -6,7 +6,7 @@
#include "FakeQuantizationTestImpl.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -48,7 +48,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
data.m_Parameters.m_Min = min;
data.m_Parameters.m_Max = max;
- armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+ armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
armnn::FakeQuantizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 9176094eb2..cd7f4efe31 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -8,7 +8,7 @@
#include <QuantizeHelper.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataTypeUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -40,8 +40,8 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::FullyConnectedQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
+ armnn::ScopedTensorHandle weightsTensor(weightsDesc);
+ armnn::ScopedTensorHandle biasTensor(biasesDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 2e205dd58e..24a4dc4789 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -9,7 +9,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 7ee7a3465b..f32d367d37 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -10,7 +10,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 07a1f1e879..7a9652a8ea 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -9,7 +9,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -269,19 +269,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -971,23 +971,23 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
std::vector<float> projectionBiasVector(outputSize, 0.f);
auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1142,21 +1142,21 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1455,28 +1455,28 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
-
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
+
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1673,21 +1673,21 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
@@ -1891,22 +1891,22 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -2145,28 +2145,28 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
{-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(projectionWeightsInfo);
+ armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
@@ -2411,24 +2411,24 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
{-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(projectionWeightsInfo);
+ armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index 2e8e16f0c2..b52dcd5303 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -75,7 +75,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
- armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -219,7 +219,7 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
- armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index c4cc914115..9688ce49f2 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -9,7 +9,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 328e724b54..85ce7e5e6f 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -68,7 +68,7 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
}
// set up weights
- ScopedCpuTensorHandle weightsTensor(weights.first);
+ ScopedTensorHandle weightsTensor(weights.first);
TransposeConvolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
@@ -76,11 +76,11 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
- std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
+ std::unique_ptr<ScopedTensorHandle> biasesTensor;
if (descriptor.m_BiasEnabled)
{
// set up biases
- biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
+ biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
queueDescriptor.m_Bias = biasesTensor.get();
AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
@@ -643,8 +643,8 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 19ad773338..0d8d0a7c28 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -16,10 +16,10 @@
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/workloads/ClWorkloads.hpp>
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index e22479c25b..47e2f4e8d7 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -395,8 +395,8 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index 2b8b0d48a7..794a45fa48 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -10,7 +10,7 @@
#include <layers/MeanLayer.hpp>
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClWorkloadFactory.hpp>
#include <cl/test/ClContextControlFixture.hpp>
#include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index b7d274fdca..5afafcb783 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -10,7 +10,7 @@
#include <Graph.hpp>
#include <Optimizer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 0e1f28ec4e..1b86d2e304 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -9,7 +9,7 @@
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <cl/ClContextControl.hpp>
@@ -79,10 +79,10 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
BatchNormalizationQueueDescriptor data;
WorkloadInfo info;
- ScopedCpuTensorHandle meanTensor(tensorInfo);
- ScopedCpuTensorHandle varianceTensor(tensorInfo);
- ScopedCpuTensorHandle betaTensor(tensorInfo);
- ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ ScopedTensorHandle meanTensor(tensorInfo);
+ ScopedTensorHandle varianceTensor(tensorInfo);
+ ScopedTensorHandle betaTensor(tensorInfo);
+ ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 8997a9720d..e2f64a9d7e 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -7,7 +7,7 @@
#include "ClWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index 0ab7446026..4793cc6f8f 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClAdditionWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
index 8974930afa..7475cfa315 100644
--- a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index daaed17a90..361d6f87a5 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
index 8978c5a66e..b9736db642 100644
--- a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClBatchToSpaceNdWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClComparisonWorkload.cpp b/src/backends/cl/workloads/ClComparisonWorkload.cpp
index 20e5669807..35e6d68733 100644
--- a/src/backends/cl/workloads/ClComparisonWorkload.cpp
+++ b/src/backends/cl/workloads/ClComparisonWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index 3c5f23742a..1c2d476e0c 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index 40acb8ebd0..60dcd59268 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -8,7 +8,7 @@
#include <Half.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "ClWorkloadUtils.hpp"
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 99a981bd0c..5c731aa0a1 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -12,7 +12,7 @@
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 655f0c9c35..50cdb0a626 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -12,7 +12,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
@@ -137,7 +137,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
- ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
+ ScopedTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
if (m_BiasTensor)
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
index 52d8fab93c..42cc579a8c 100644
--- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/core/Types.h>
diff --git a/src/backends/cl/workloads/ClDivisionWorkload.cpp b/src/backends/cl/workloads/ClDivisionWorkload.cpp
index be5f3b8225..76220a1b64 100644
--- a/src/backends/cl/workloads/ClDivisionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClDivisionWorkload.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 9135d27376..d1d911ac13 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClFullyConnectedWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index bd38219a3e..984f21a4db 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClL2NormalizationFloatWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include "ClWorkloadUtils.hpp"
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index 58cc735704..908f20bfe5 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClLstmFloatWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
diff --git a/src/backends/cl/workloads/ClMaximumWorkload.cpp b/src/backends/cl/workloads/ClMaximumWorkload.cpp
index 85bea47f21..0aa15e5dd3 100644
--- a/src/backends/cl/workloads/ClMaximumWorkload.cpp
+++ b/src/backends/cl/workloads/ClMaximumWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClMinimumWorkload.cpp b/src/backends/cl/workloads/ClMinimumWorkload.cpp
index 07a78b5008..4924002432 100644
--- a/src/backends/cl/workloads/ClMinimumWorkload.cpp
+++ b/src/backends/cl/workloads/ClMinimumWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 31e9d022cc..2bd1e1615a 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClMultiplicationWorkload.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index 290d29ae52..e9b2caf6ee 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClNormalizationFloatWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
diff --git a/src/backends/cl/workloads/ClPreluWorkload.cpp b/src/backends/cl/workloads/ClPreluWorkload.cpp
index 73fa887532..9b45441b02 100644
--- a/src/backends/cl/workloads/ClPreluWorkload.cpp
+++ b/src/backends/cl/workloads/ClPreluWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClPreluWorkload.hpp"
#include "ClWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClQuantizeWorkload.cpp b/src/backends/cl/workloads/ClQuantizeWorkload.cpp
index 5c945e0921..527c64013b 100644
--- a/src/backends/cl/workloads/ClQuantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClQuantizeWorkload.cpp
@@ -9,7 +9,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
index 636bdecbeb..d50414b1cf 100644
--- a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
+++ b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClQuantizedLstmWorkload.hpp"
#include "ClWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index 0988babf23..1f82cfbee2 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClReshapeWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "ClWorkloadUtils.hpp"
diff --git a/src/backends/cl/workloads/ClResizeWorkload.cpp b/src/backends/cl/workloads/ClResizeWorkload.cpp
index e47740624e..3406011d04 100644
--- a/src/backends/cl/workloads/ClResizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index 7b29cded0f..3aa8ebd2a8 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -11,7 +11,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
index 7a590d26b6..67487c4bf1 100644
--- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
index 70a817825e..8eb58c967e 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.cpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index 749282f53a..38c76eb648 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -7,7 +7,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
index 92e860fc42..adf32ce1fc 100644
--- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 31e0becfd8..e320fec342 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClSubtractionWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index ff0fd5c168..b40b4b10ca 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -14,7 +14,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h>
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index b0cc8b4ed5..467505d55b 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/OpenClTimer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/Utils.hpp>
@@ -88,7 +88,7 @@ inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
}
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
- const ConstCpuTensorHandle* handle)
+ const ConstTensorHandle* handle)
{
ARMNN_ASSERT(handle);
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 9967fb8604..8751d8ca2c 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -15,10 +15,10 @@
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <neon/workloads/NeonWorkloads.hpp>
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index a14122f573..13a4c732a4 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -9,7 +9,7 @@
#include <layers/ConvertFp32ToFp16Layer.hpp>
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonWorkloadFactory.hpp>
#include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
#include <backendsCommon/test/LayerTests.hpp>
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index d9edca1b6d..9acd0e41e2 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -8,7 +8,7 @@
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <neon/NeonTimer.hpp>
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index 9300b317a9..5891677c0d 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEArithmeticAddition.h>
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index 6290ecce17..cc85791ae6 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index 33480faf69..5da7cca83e 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -12,7 +12,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h>
diff --git a/src/backends/neon/workloads/NeonComparisonWorkload.cpp b/src/backends/neon/workloads/NeonComparisonWorkload.cpp
index 6e1f208228..01a6a0c78b 100644
--- a/src/backends/neon/workloads/NeonComparisonWorkload.cpp
+++ b/src/backends/neon/workloads/NeonComparisonWorkload.cpp
@@ -7,7 +7,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index a8f6dbed23..5cd906da41 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -9,7 +9,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index 0859abd394..77e4420794 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -11,7 +11,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <neon/NeonTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index fd8be17dfd..32af3f853a 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h>
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index db6bcc3ecb..ad509076b4 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -14,7 +14,7 @@
#include <neon/NeonLayerSupport.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
@@ -136,7 +136,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
- ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
+ ScopedTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
index 9ae82ff79f..07323d19ca 100644
--- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
@@ -11,7 +11,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
index 1a26d9510a..fa61a100a9 100644
--- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 31489a0c32..713771be91 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -12,7 +12,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
diff --git a/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
index d6c30817b8..1bfd1e4d47 100644
--- a/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
@@ -8,7 +8,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
using namespace armnn::armcomputetensorutils;
diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
index 46d500bfdc..c4500d885a 100644
--- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
@@ -6,7 +6,7 @@
#include "NeonMaximumWorkload.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
index 53e483a182..519b3c4bc6 100644
--- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
@@ -7,7 +7,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp b/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
index 4c2ba7513d..d809017692 100644
--- a/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
@@ -7,7 +7,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp
index ae07b250d6..ab01e30140 100644
--- a/src/backends/neon/workloads/NeonResizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
index de6f1378bd..4e428a2654 100644
--- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
#include "NeonWorkloadUtils.hpp"
diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp
index 696de65620..0b327b8a37 100644
--- a/src/backends/neon/workloads/NeonStackWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStackWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
index 21f0f6fa41..64f68aa6e2 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
@@ -12,7 +12,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 9c7e99c6a0..a1e545ce05 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -13,7 +13,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 589d2ea017..ab7616fbe2 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTimer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/Utils.hpp>
@@ -33,7 +33,7 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
}
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
- const ConstCpuTensorHandle* handle)
+ const ConstTensorHandle* handle)
{
ARMNN_ASSERT(handle);
diff --git a/src/backends/reference/RefTensorHandle.hpp b/src/backends/reference/RefTensorHandle.hpp
index 8c64dfbe63..3dfc0395df 100644
--- a/src/backends/reference/RefTensorHandle.hpp
+++ b/src/backends/reference/RefTensorHandle.hpp
@@ -4,7 +4,7 @@
//
#pragma once
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "RefMemoryManager.hpp"
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index c1e3d58bd2..8e3bbe468f 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -3,10 +3,10 @@
// SPDX-License-Identifier: MIT
//
#include <Layer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <reference/workloads/RefFillWorkload.hpp>
#include "RefWorkloadFactory.hpp"
#include "RefBackendId.hpp"
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index 1648583210..a1487061b6 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -7,7 +7,7 @@
#include <layers/ConvertFp32ToFp16Layer.hpp>
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <reference/RefWorkloadFactory.hpp>
#include <reference/RefLayerSupport.hpp>
#include <backendsCommon/test/LayerTests.hpp>
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index b04d9d6c52..dadd1de1f2 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -173,8 +173,8 @@ BOOST_AUTO_TEST_CASE(TestManagedConstTensorHandle)
void* mem = nullptr;
TensorInfo info;
- // Use PassthroughCpuTensor as others are abstract
- auto passThroughHandle = std::make_shared<PassthroughCpuTensorHandle>(info, mem);
+ // Use PassthroughTensor as others are abstract
+ auto passThroughHandle = std::make_shared<PassthroughTensorHandle>(info, mem);
// Test managed handle is initialized with m_Mapped unset and once Map() called its set
ManagedConstTensorHandle managedHandle(passThroughHandle);
diff --git a/src/backends/reference/workloads/LstmUtils.cpp b/src/backends/reference/workloads/LstmUtils.cpp
index f197aae291..8e1db0e88f 100644
--- a/src/backends/reference/workloads/LstmUtils.cpp
+++ b/src/backends/reference/workloads/LstmUtils.cpp
@@ -7,7 +7,7 @@
#include "LstmUtils.hpp"
#include "BaseIterator.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
// Helper functions ported from the Android code base
@@ -296,12 +296,12 @@ void SetActivationParameters(uint32_t activation,
}
}
-std::unique_ptr<armnn::ScopedCpuTensorHandle> AssignScopedCpuTensorHandle(const armnn::ConstCpuTensorHandle* ptr)
+std::unique_ptr<armnn::ScopedTensorHandle> AssignScopedTensorHandle(const armnn::ConstTensorHandle *ptr)
{
if (!ptr)
{
return nullptr;
}
- return std::make_unique<armnn::ScopedCpuTensorHandle>(*ptr);
+ return std::make_unique<armnn::ScopedTensorHandle>(*ptr);
}
diff --git a/src/backends/reference/workloads/LstmUtils.hpp b/src/backends/reference/workloads/LstmUtils.hpp
index f6aff8b69f..11fbb77c5d 100644
--- a/src/backends/reference/workloads/LstmUtils.hpp
+++ b/src/backends/reference/workloads/LstmUtils.hpp
@@ -6,7 +6,7 @@
#pragma once
#include "BaseIterator.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
// Helper functions ported from the Android code base
// Refer to: android/external/tensorflow/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -85,4 +85,4 @@ void SetActivationParameters(uint32_t activation,
float& outA,
float& outB);
-std::unique_ptr<armnn::ScopedCpuTensorHandle> AssignScopedCpuTensorHandle(const armnn::ConstCpuTensorHandle* ptr);
+std::unique_ptr<armnn::ScopedTensorHandle> AssignScopedTensorHandle(const armnn::ConstTensorHandle *ptr);
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
index e1068896ba..282374d89b 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
@@ -16,10 +16,10 @@ namespace armnn
RefBatchNormalizationWorkload::RefBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info)
: BaseWorkload(descriptor, info)
- , m_Mean (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Mean)))
- , m_Variance(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Variance)))
- , m_Beta (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Beta)))
- , m_Gamma (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Gamma)))
+ , m_Mean (std::make_unique<ScopedTensorHandle>(*(descriptor.m_Mean)))
+ , m_Variance(std::make_unique<ScopedTensorHandle>(*(descriptor.m_Variance)))
+ , m_Beta (std::make_unique<ScopedTensorHandle>(*(descriptor.m_Beta)))
+ , m_Gamma (std::make_unique<ScopedTensorHandle>(*(descriptor.m_Gamma)))
{}
void RefBatchNormalizationWorkload::Execute() const
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
index a8a72ef65c..6fe05fd192 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
@@ -21,10 +21,10 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Mean;
- std::unique_ptr<ScopedCpuTensorHandle> m_Variance;
- std::unique_ptr<ScopedCpuTensorHandle> m_Beta;
- std::unique_ptr<ScopedCpuTensorHandle> m_Gamma;
+ std::unique_ptr<ScopedTensorHandle> m_Mean;
+ std::unique_ptr<ScopedTensorHandle> m_Variance;
+ std::unique_ptr<ScopedTensorHandle> m_Beta;
+ std::unique_ptr<ScopedTensorHandle> m_Gamma;
};
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index 6d0ab413d8..5ae1af8967 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -16,7 +16,7 @@ RefConvolution2dWorkload::RefConvolution2dWorkload(
const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
{
- m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
m_FilterShape = rFilterInfo.GetShape();
@@ -24,7 +24,7 @@ RefConvolution2dWorkload::RefConvolution2dWorkload(
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
index 57df3ce6ae..3b2c76ade0 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
@@ -25,8 +25,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::unique_ptr<ScopedTensorHandle> m_Weight;
+ std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_FilterDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
index 8fe5dec7d1..b447d1a441 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
@@ -19,14 +19,14 @@ RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload(
const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
{
- m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
m_FilterShape = rFilterInfo.GetShape();
m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight->Map(true));
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
index 65a8fd76cf..62289ca34f 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
@@ -24,8 +24,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr <ScopedCpuTensorHandle> m_Weight;
- std::unique_ptr <ScopedCpuTensorHandle> m_Bias;
+ std::unique_ptr <ScopedTensorHandle> m_Weight;
+ std::unique_ptr <ScopedTensorHandle> m_Bias;
std::unique_ptr <Decoder<float>> m_FilterDecoder;
std::unique_ptr <Decoder<float>> m_BiasDecoder;
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
index 6784e21585..4bc9eb1704 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
@@ -16,7 +16,7 @@ namespace armnn
RefDetectionPostProcessWorkload::RefDetectionPostProcessWorkload(
const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<DetectionPostProcessQueueDescriptor>(descriptor, info),
- m_Anchors(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Anchors))) {}
+ m_Anchors(std::make_unique<ScopedTensorHandle>(*(descriptor.m_Anchors))) {}
void RefDetectionPostProcessWorkload::Execute() const
{
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
index 007dcea456..920db96603 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
@@ -21,7 +21,7 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Anchors;
+ std::unique_ptr<ScopedTensorHandle> m_Anchors;
};
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index deb56d4c6b..3e63afac6f 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -18,14 +18,14 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
{
if (descriptor.m_Parameters.m_ConstantWeights)
{
- m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
m_WeightShape = rWeightInfo.GetShape();
m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(true));
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
index 5c0f67ebaf..6a4e5126cd 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
@@ -29,8 +29,8 @@ public:
private:
void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::unique_ptr<ScopedTensorHandle> m_Weight;
+ std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_WeightDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;
diff --git a/src/backends/reference/workloads/RefLstmWorkload.cpp b/src/backends/reference/workloads/RefLstmWorkload.cpp
index 09423547da..3ddfd334b8 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.cpp
@@ -15,27 +15,27 @@ namespace armnn
RefLstmWorkload::RefLstmWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
: BaseWorkload<LstmQueueDescriptor>(descriptor, info)
- , m_InputToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToInputWeights))
- , m_InputToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToForgetWeights))
- , m_InputToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToCellWeights))
- , m_InputToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToOutputWeights))
- , m_RecurrentToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToInputWeights))
- , m_RecurrentToForgetWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToForgetWeights))
- , m_RecurrentToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToCellWeights))
- , m_RecurrentToOutputWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToOutputWeights))
- , m_CellToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToInputWeights))
- , m_CellToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToForgetWeights))
- , m_CellToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToOutputWeights))
- , m_InputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_InputGateBias))
- , m_ForgetGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ForgetGateBias))
- , m_CellBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_CellBias))
- , m_OutputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_OutputGateBias))
- , m_ProjectionWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionWeights))
- , m_ProjectionBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionBias))
- , m_InputLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_InputLayerNormWeights))
- , m_ForgetLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_ForgetLayerNormWeights))
- , m_CellLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_CellLayerNormWeights))
- , m_OutputLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_OutputLayerNormWeights))
+ , m_InputToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToInputWeights))
+ , m_InputToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToForgetWeights))
+ , m_InputToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToCellWeights))
+ , m_InputToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToOutputWeights))
+ , m_RecurrentToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToInputWeights))
+ , m_RecurrentToForgetWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToForgetWeights))
+ , m_RecurrentToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToCellWeights))
+ , m_RecurrentToOutputWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToOutputWeights))
+ , m_CellToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToInputWeights))
+ , m_CellToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToForgetWeights))
+ , m_CellToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToOutputWeights))
+ , m_InputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_InputGateBias))
+ , m_ForgetGateBiasTensor (AssignScopedTensorHandle(descriptor.m_ForgetGateBias))
+ , m_CellBiasTensor (AssignScopedTensorHandle(descriptor.m_CellBias))
+ , m_OutputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_OutputGateBias))
+ , m_ProjectionWeightsTensor (AssignScopedTensorHandle(descriptor.m_ProjectionWeights))
+ , m_ProjectionBiasTensor (AssignScopedTensorHandle(descriptor.m_ProjectionBias))
+ , m_InputLayerNormWeights (AssignScopedTensorHandle(descriptor.m_InputLayerNormWeights))
+ , m_ForgetLayerNormWeights (AssignScopedTensorHandle(descriptor.m_ForgetLayerNormWeights))
+ , m_CellLayerNormWeights (AssignScopedTensorHandle(descriptor.m_CellLayerNormWeights))
+ , m_OutputLayerNormWeights (AssignScopedTensorHandle(descriptor.m_OutputLayerNormWeights))
{}
void RefLstmWorkload::Execute() const
diff --git a/src/backends/reference/workloads/RefLstmWorkload.hpp b/src/backends/reference/workloads/RefLstmWorkload.hpp
index b55a1f9a9e..6feffbcb00 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.hpp
@@ -23,27 +23,27 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeights;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeights;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeights;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_InputToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToOutputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToOutputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToOutputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_OutputGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_CellLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_OutputLayerNormWeights;
float m_LayerNormEpsilon = static_cast<float>(1e-8);
};
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.cpp b/src/backends/reference/workloads/RefQLstmWorkload.cpp
index 7b7961c5a0..dc29d0b92d 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.cpp
@@ -15,32 +15,32 @@ namespace armnn
RefQLstmWorkload::RefQLstmWorkload(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
: BaseWorkload<QLstmQueueDescriptor>(descriptor, info)
- , m_InputToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToInputWeights))
- , m_InputToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToForgetWeights))
- , m_InputToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToCellWeights))
- , m_InputToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToOutputWeights))
-
- , m_RecurrentToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToInputWeights))
- , m_RecurrentToForgetWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToForgetWeights))
- , m_RecurrentToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToCellWeights))
- , m_RecurrentToOutputWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToOutputWeights))
-
- , m_CellToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToInputWeights))
- , m_CellToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToForgetWeights))
- , m_CellToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToOutputWeights))
-
- , m_InputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_InputGateBias))
- , m_ForgetGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ForgetGateBias))
- , m_CellBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_CellBias))
- , m_OutputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_OutputGateBias))
-
- , m_ProjectionWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionWeights))
- , m_ProjectionBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionBias))
-
- , m_InputLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputLayerNormWeights))
- , m_ForgetLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_ForgetLayerNormWeights))
- , m_CellLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellLayerNormWeights))
- , m_OutputLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_OutputLayerNormWeights))
+ , m_InputToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToInputWeights))
+ , m_InputToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToForgetWeights))
+ , m_InputToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToCellWeights))
+ , m_InputToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToOutputWeights))
+
+ , m_RecurrentToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToInputWeights))
+ , m_RecurrentToForgetWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToForgetWeights))
+ , m_RecurrentToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToCellWeights))
+ , m_RecurrentToOutputWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToOutputWeights))
+
+ , m_CellToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToInputWeights))
+ , m_CellToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToForgetWeights))
+ , m_CellToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToOutputWeights))
+
+ , m_InputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_InputGateBias))
+ , m_ForgetGateBiasTensor (AssignScopedTensorHandle(descriptor.m_ForgetGateBias))
+ , m_CellBiasTensor (AssignScopedTensorHandle(descriptor.m_CellBias))
+ , m_OutputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_OutputGateBias))
+
+ , m_ProjectionWeightsTensor (AssignScopedTensorHandle(descriptor.m_ProjectionWeights))
+ , m_ProjectionBiasTensor (AssignScopedTensorHandle(descriptor.m_ProjectionBias))
+
+ , m_InputLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputLayerNormWeights))
+ , m_ForgetLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_ForgetLayerNormWeights))
+ , m_CellLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellLayerNormWeights))
+ , m_OutputLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_OutputLayerNormWeights))
{}
void RefQLstmWorkload::Execute() const
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.hpp b/src/backends/reference/workloads/RefQLstmWorkload.hpp
index f4242ec8a4..0aa7e10bbf 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.hpp
@@ -23,32 +23,32 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeightsTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeightsTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeightsTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBiasTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBiasTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToOutputWeightsTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToOutputWeightsTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_CellToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToOutputWeightsTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_InputGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_OutputGateBiasTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionBiasTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_InputLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_OutputLayerNormWeightsTensor;
float m_LayerNormEpsilon = static_cast<float>(1e-8);
};
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
index 634122835f..8665648fe6 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
@@ -18,7 +18,7 @@ RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(
BaseWorkload<TransposeConvolution2dQueueDescriptor>(descriptor, info)
{
// set up weights decoder
- m_Weights = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weights = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& weightsInfo = m_Weights->GetTensorInfo();
m_WeightsDecoder = MakeDecoder<float>(weightsInfo, m_Weights->Map(true));
@@ -27,7 +27,7 @@ RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(
// set up biases decoder
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Biases = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Biases = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasesInfo = m_Biases->GetTensorInfo();
m_BiasesDecoder = MakeDecoder<float>(biasesInfo, m_Biases->Map(true));
}
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
index 7c18f10293..997ccbfe12 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
@@ -8,7 +8,7 @@
#include "Decoders.hpp"
#include "Encoders.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
namespace armnn
@@ -26,8 +26,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Weights;
- std::unique_ptr<ScopedCpuTensorHandle> m_Biases;
+ std::unique_ptr<ScopedTensorHandle> m_Weights;
+ std::unique_ptr<ScopedTensorHandle> m_Biases;
std::unique_ptr<Decoder<float>> m_WeightsDecoder;
std::unique_ptr<Decoder<float>> m_BiasesDecoder;
diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp
index 0d839afc1c..3659617b7f 100644
--- a/src/backends/reference/workloads/RefWorkloadUtils.hpp
+++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Types.hpp>
diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp
index ac324946d2..227190cb1b 100644
--- a/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp
+++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.cpp
@@ -3,8 +3,8 @@
// SPDX-License-Identifier: MIT
//
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "SampleDynamicAdditionWorkload.hpp"
#include "SampleDynamicBackend.hpp"
@@ -45,7 +45,7 @@ std::unique_ptr<armnn::ITensorHandle> SampleDynamicWorkloadFactory::CreateTensor
const armnn::TensorInfo& tensorInfo,
const bool isMemoryManaged) const
{
- return std::make_unique<armnn::ScopedCpuTensorHandle>(tensorInfo);
+ return std::make_unique<armnn::ScopedTensorHandle>(tensorInfo);
}
std::unique_ptr<armnn::ITensorHandle> SampleDynamicWorkloadFactory::CreateTensorHandle(
@@ -53,7 +53,7 @@ std::unique_ptr<armnn::ITensorHandle> SampleDynamicWorkloadFactory::CreateTensor
armnn::DataLayout dataLayout,
const bool isMemoryManaged) const
{
- return std::make_unique<armnn::ScopedCpuTensorHandle>(tensorInfo);
+ return std::make_unique<armnn::ScopedTensorHandle>(tensorInfo);
}
std::unique_ptr<armnn::IWorkload> SampleDynamicWorkloadFactory::CreateAddition(
diff --git a/src/dynamic/sample/SampleTensorHandle.hpp b/src/dynamic/sample/SampleTensorHandle.hpp
index 9ddccedce2..dce98cfc0b 100644
--- a/src/dynamic/sample/SampleTensorHandle.hpp
+++ b/src/dynamic/sample/SampleTensorHandle.hpp
@@ -4,7 +4,7 @@
//
#pragma once
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "SampleMemoryManager.hpp"