aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/CMakeLists.txt5
-rw-r--r--src/backends/backendsCommon/CpuTensorHandleFwd.hpp9
-rw-r--r--src/backends/backendsCommon/MemCopyWorkload.cpp2
-rw-r--r--src/backends/backendsCommon/MemCopyWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/MemImportWorkload.cpp2
-rw-r--r--src/backends/backendsCommon/MemImportWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/MemSyncWorkload.cpp2
-rw-r--r--src/backends/backendsCommon/MemSyncWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/TensorHandle.cpp (renamed from src/backends/backendsCommon/CpuTensorHandle.cpp)54
-rw-r--r--src/backends/backendsCommon/TensorHandle.hpp (renamed from src/backends/backendsCommon/CpuTensorHandle.hpp)94
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp142
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp4
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp6
-rw-r--r--src/backends/backendsCommon/common.mk2
-rw-r--r--src/backends/backendsCommon/test/CommonTestUtils.hpp6
-rw-r--r--src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp30
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp4
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp106
-rw-r--r--src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp22
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp58
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp26
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp46
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp244
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp12
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp2
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp4
-rw-r--r--src/backends/cl/test/ClLayerSupportTests.cpp2
-rw-r--r--src/backends/cl/test/Fp16SupportTest.cpp2
-rw-r--r--src/backends/cl/test/OpenClTimerTest.cpp10
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClArgMinMaxWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClComparisonWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConcatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConstantWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClDequantizeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDivisionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMaximumWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMinimumWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClMultiplicationWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPreluWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClQuantizeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClReshapeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClResizeWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSplitterWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp4
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/test/NeonLayerSupportTests.cpp2
-rw-r--r--src/backends/neon/test/NeonTimerTest.cpp2
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonComparisonWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDequantizeWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonDivisionWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonMaximumWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonMinimumWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonResizeWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonStackWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSubtractionWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp4
-rw-r--r--src/backends/reference/RefTensorHandle.hpp2
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/test/RefLayerSupportTests.cpp2
-rw-r--r--src/backends/reference/test/RefTensorHandleTests.cpp4
-rw-r--r--src/backends/reference/workloads/LstmUtils.cpp6
-rw-r--r--src/backends/reference/workloads/LstmUtils.hpp4
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp8
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp8
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp2
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.cpp42
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.hpp42
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.cpp52
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.hpp52
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp6
-rw-r--r--src/backends/reference/workloads/RefWorkloadUtils.hpp2
118 files changed, 661 insertions, 659 deletions
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index cf6da807ca..2b48532961 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -4,9 +4,8 @@
#
list(APPEND armnnBackendsCommon_sources
- CpuTensorHandle.cpp
- CpuTensorHandle.hpp
- CpuTensorHandleFwd.hpp
+ TensorHandle.cpp
+ TensorHandle.hpp
DynamicBackend.cpp
DynamicBackend.hpp
DynamicBackendUtils.cpp
diff --git a/src/backends/backendsCommon/CpuTensorHandleFwd.hpp b/src/backends/backendsCommon/CpuTensorHandleFwd.hpp
deleted file mode 100644
index aef48b5a92..0000000000
--- a/src/backends/backendsCommon/CpuTensorHandleFwd.hpp
+++ /dev/null
@@ -1,9 +0,0 @@
-//
-// Copyright © 2019 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-// This file is depricated and will be removed soon.
-// Please use the new header in armnn/backends instead.
-// This will use the new armnn/backends header.
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
diff --git a/src/backends/backendsCommon/MemCopyWorkload.cpp b/src/backends/backendsCommon/MemCopyWorkload.cpp
index 813adefed7..946de30430 100644
--- a/src/backends/backendsCommon/MemCopyWorkload.cpp
+++ b/src/backends/backendsCommon/MemCopyWorkload.cpp
@@ -6,7 +6,7 @@
#include <ResolveType.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
diff --git a/src/backends/backendsCommon/MemCopyWorkload.hpp b/src/backends/backendsCommon/MemCopyWorkload.hpp
index 12664fd527..99845f397f 100644
--- a/src/backends/backendsCommon/MemCopyWorkload.hpp
+++ b/src/backends/backendsCommon/MemCopyWorkload.hpp
@@ -7,7 +7,7 @@
#include "Workload.hpp"
#include "WorkloadUtils.hpp"
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <utility>
diff --git a/src/backends/backendsCommon/MemImportWorkload.cpp b/src/backends/backendsCommon/MemImportWorkload.cpp
index 6584e407e9..844908f27b 100644
--- a/src/backends/backendsCommon/MemImportWorkload.cpp
+++ b/src/backends/backendsCommon/MemImportWorkload.cpp
@@ -6,7 +6,7 @@
#include <ResolveType.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cstring>
diff --git a/src/backends/backendsCommon/MemImportWorkload.hpp b/src/backends/backendsCommon/MemImportWorkload.hpp
index 33297fbc6f..d3c57239bd 100644
--- a/src/backends/backendsCommon/MemImportWorkload.hpp
+++ b/src/backends/backendsCommon/MemImportWorkload.hpp
@@ -7,7 +7,7 @@
#include "Workload.hpp"
#include "WorkloadUtils.hpp"
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <utility>
diff --git a/src/backends/backendsCommon/MemSyncWorkload.cpp b/src/backends/backendsCommon/MemSyncWorkload.cpp
index fe04a3024b..9025e665c9 100644
--- a/src/backends/backendsCommon/MemSyncWorkload.cpp
+++ b/src/backends/backendsCommon/MemSyncWorkload.cpp
@@ -6,7 +6,7 @@
#include <ResolveType.hpp>
#include <backendsCommon/MemSyncWorkload.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cstring>
diff --git a/src/backends/backendsCommon/MemSyncWorkload.hpp b/src/backends/backendsCommon/MemSyncWorkload.hpp
index 8142f180a6..7b59a0b55f 100644
--- a/src/backends/backendsCommon/MemSyncWorkload.hpp
+++ b/src/backends/backendsCommon/MemSyncWorkload.hpp
@@ -7,7 +7,7 @@
#include "Workload.hpp"
#include "WorkloadUtils.hpp"
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <utility>
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/TensorHandle.cpp
index 192469a633..d4660d6de3 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/TensorHandle.cpp
@@ -1,11 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/Exceptions.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cstring>
@@ -28,54 +28,54 @@ TensorShape GetUnpaddedTensorStrides(const TensorInfo& tensorInfo)
return TensorShape(shape.GetNumDimensions(), strides.data());
}
-ConstCpuTensorHandle::ConstCpuTensorHandle(const TensorInfo& tensorInfo)
+ConstTensorHandle::ConstTensorHandle(const TensorInfo& tensorInfo)
: m_TensorInfo(tensorInfo)
, m_Memory(nullptr)
{
}
template <>
-const void* ConstCpuTensorHandle::GetConstTensor<void>() const
+const void* ConstTensorHandle::GetConstTensor<void>() const
{
return m_Memory;
}
-CpuTensorHandle::CpuTensorHandle(const TensorInfo& tensorInfo)
-: ConstCpuTensorHandle(tensorInfo)
+TensorHandle::TensorHandle(const TensorInfo& tensorInfo)
+: ConstTensorHandle(tensorInfo)
, m_MutableMemory(nullptr)
{
}
template <>
-void* CpuTensorHandle::GetTensor<void>() const
+void* TensorHandle::GetTensor<void>() const
{
return m_MutableMemory;
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const TensorInfo& tensorInfo)
-: CpuTensorHandle(tensorInfo)
+ScopedTensorHandle::ScopedTensorHandle(const TensorInfo& tensorInfo)
+: TensorHandle(tensorInfo)
{
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ConstTensor& tensor)
-: ScopedCpuTensorHandle(tensor.GetInfo())
+ScopedTensorHandle::ScopedTensorHandle(const ConstTensor& tensor)
+: ScopedTensorHandle(tensor.GetInfo())
{
CopyFrom(tensor.GetMemoryArea(), tensor.GetNumBytes());
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ConstCpuTensorHandle& tensorHandle)
-: ScopedCpuTensorHandle(tensorHandle.GetTensorInfo())
+ScopedTensorHandle::ScopedTensorHandle(const ConstTensorHandle& tensorHandle)
+: ScopedTensorHandle(tensorHandle.GetTensorInfo())
{
CopyFrom(tensorHandle.GetConstTensor<void>(), tensorHandle.GetTensorInfo().GetNumBytes());
}
-ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ScopedCpuTensorHandle& other)
-: CpuTensorHandle(other.GetTensorInfo())
+ScopedTensorHandle::ScopedTensorHandle(const ScopedTensorHandle& other)
+: TensorHandle(other.GetTensorInfo())
{
CopyFrom(other);
}
-ScopedCpuTensorHandle& ScopedCpuTensorHandle::operator=(const ScopedCpuTensorHandle& other)
+ScopedTensorHandle& ScopedTensorHandle::operator=(const ScopedTensorHandle& other)
{
::operator delete(GetTensor<void>());
SetMemory(nullptr);
@@ -83,12 +83,12 @@ ScopedCpuTensorHandle& ScopedCpuTensorHandle::operator=(const ScopedCpuTensorHan
return *this;
}
-ScopedCpuTensorHandle::~ScopedCpuTensorHandle()
+ScopedTensorHandle::~ScopedTensorHandle()
{
::operator delete(GetTensor<void>());
}
-void ScopedCpuTensorHandle::Allocate()
+void ScopedTensorHandle::Allocate()
{
if (GetTensor<void>() == nullptr)
{
@@ -96,27 +96,27 @@ void ScopedCpuTensorHandle::Allocate()
}
else
{
- throw InvalidArgumentException("CpuTensorHandle::Allocate Trying to allocate a CpuTensorHandle"
+ throw InvalidArgumentException("TensorHandle::Allocate Trying to allocate a TensorHandle"
"that already has allocated memory.");
}
}
-void ScopedCpuTensorHandle::CopyOutTo(void* memory) const
+void ScopedTensorHandle::CopyOutTo(void* memory) const
{
memcpy(memory, GetTensor<void>(), GetTensorInfo().GetNumBytes());
}
-void ScopedCpuTensorHandle::CopyInFrom(const void* memory)
+void ScopedTensorHandle::CopyInFrom(const void* memory)
{
memcpy(GetTensor<void>(), memory, GetTensorInfo().GetNumBytes());
}
-void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
+void ScopedTensorHandle::CopyFrom(const ScopedTensorHandle& other)
{
CopyFrom(other.GetTensor<void>(), other.GetTensorInfo().GetNumBytes());
}
-void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
+void ScopedTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
{
ARMNN_ASSERT(GetTensor<void>() == nullptr);
ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
@@ -128,14 +128,14 @@ void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numByte
}
}
-void PassthroughCpuTensorHandle::Allocate()
+void PassthroughTensorHandle::Allocate()
{
- throw InvalidArgumentException("PassthroughCpuTensorHandle::Allocate() should never be called");
+ throw InvalidArgumentException("PassthroughTensorHandle::Allocate() should never be called");
}
-void ConstPassthroughCpuTensorHandle::Allocate()
+void ConstPassthroughTensorHandle::Allocate()
{
- throw InvalidArgumentException("ConstPassthroughCpuTensorHandle::Allocate() should never be called");
+ throw InvalidArgumentException("ConstPassthroughTensorHandle::Allocate() should never be called");
}
} // namespace armnn
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/TensorHandle.hpp
index fdd2439b41..4e9d87d6eb 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/TensorHandle.hpp
@@ -1,11 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <armnn/backends/ITensorHandle.hpp>
#include <armnn/TypesUtils.hpp>
@@ -23,8 +23,8 @@ namespace armnn
// of a tensor, assuming fully packed data with no padding
TensorShape GetUnpaddedTensorStrides(const TensorInfo& tensorInfo);
-// Abstract tensor handles wrapping a CPU-readable region of memory, interpreting it as tensor data.
-class ConstCpuTensorHandle : public ITensorHandle
+// Abstract tensor handles wrapping a readable region of memory, interpreting it as tensor data.
+class ConstTensorHandle : public ITensorHandle
{
public:
template <typename T>
@@ -53,7 +53,7 @@ public:
TensorShape GetShape() const override { return m_TensorInfo.GetShape(); }
protected:
- ConstCpuTensorHandle(const TensorInfo& tensorInfo);
+ ConstTensorHandle(const TensorInfo& tensorInfo);
void SetConstMemory(const void* mem) { m_Memory = mem; }
@@ -62,18 +62,18 @@ private:
void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
- ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
- ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
+ ConstTensorHandle(const ConstTensorHandle& other) = delete;
+ ConstTensorHandle& operator=(const ConstTensorHandle& other) = delete;
TensorInfo m_TensorInfo;
const void* m_Memory;
};
template<>
-const void* ConstCpuTensorHandle::GetConstTensor<void>() const;
+const void* ConstTensorHandle::GetConstTensor<void>() const;
-// Abstract specialization of ConstCpuTensorHandle that allows write access to the same data.
-class CpuTensorHandle : public ConstCpuTensorHandle
+// Abstract specialization of ConstTensorHandle that allows write access to the same data.
+class TensorHandle : public ConstTensorHandle
{
public:
template <typename T>
@@ -84,7 +84,7 @@ public:
}
protected:
- CpuTensorHandle(const TensorInfo& tensorInfo);
+ TensorHandle(const TensorInfo& tensorInfo);
void SetMemory(void* mem)
{
@@ -94,29 +94,29 @@ protected:
private:
- CpuTensorHandle(const CpuTensorHandle& other) = delete;
- CpuTensorHandle& operator=(const CpuTensorHandle& other) = delete;
+ TensorHandle(const TensorHandle& other) = delete;
+ TensorHandle& operator=(const TensorHandle& other) = delete;
void* m_MutableMemory;
};
template <>
-void* CpuTensorHandle::GetTensor<void>() const;
+void* TensorHandle::GetTensor<void>() const;
-// A CpuTensorHandle that owns the wrapped memory region.
-class ScopedCpuTensorHandle : public CpuTensorHandle
+// A TensorHandle that owns the wrapped memory region.
+class ScopedTensorHandle : public TensorHandle
{
public:
- explicit ScopedCpuTensorHandle(const TensorInfo& tensorInfo);
+ explicit ScopedTensorHandle(const TensorInfo& tensorInfo);
// Copies contents from Tensor.
- explicit ScopedCpuTensorHandle(const ConstTensor& tensor);
+ explicit ScopedTensorHandle(const ConstTensor& tensor);
- // Copies contents from ConstCpuTensorHandle
- explicit ScopedCpuTensorHandle(const ConstCpuTensorHandle& tensorHandle);
+ // Copies contents from ConstTensorHandle
+ explicit ScopedTensorHandle(const ConstTensorHandle& tensorHandle);
- ScopedCpuTensorHandle(const ScopedCpuTensorHandle& other);
- ScopedCpuTensorHandle& operator=(const ScopedCpuTensorHandle& other);
- ~ScopedCpuTensorHandle();
+ ScopedTensorHandle(const ScopedTensorHandle& other);
+ ScopedTensorHandle& operator=(const ScopedTensorHandle& other);
+ ~ScopedTensorHandle();
virtual void Allocate() override;
@@ -125,21 +125,21 @@ private:
void CopyOutTo(void* memory) const override;
void CopyInFrom(const void* memory) override;
- void CopyFrom(const ScopedCpuTensorHandle& other);
+ void CopyFrom(const ScopedTensorHandle& other);
void CopyFrom(const void* srcMemory, unsigned int numBytes);
};
-// A CpuTensorHandle that wraps an already allocated memory region.
+// A TensorHandle that wraps an already allocated memory region.
//
// Clients must make sure the passed in memory region stays alive for the lifetime of
-// the PassthroughCpuTensorHandle instance.
+// the PassthroughTensorHandle instance.
//
-// Note there is no polymorphism to/from ConstPassthroughCpuTensorHandle.
-class PassthroughCpuTensorHandle : public CpuTensorHandle
+// Note there is no polymorphism to/from ConstPassthroughTensorHandle.
+class PassthroughTensorHandle : public TensorHandle
{
public:
- PassthroughCpuTensorHandle(const TensorInfo& tensorInfo, void* mem)
- : CpuTensorHandle(tensorInfo)
+ PassthroughTensorHandle(const TensorInfo& tensorInfo, void* mem)
+ : TensorHandle(tensorInfo)
{
SetMemory(mem);
}
@@ -147,18 +147,18 @@ public:
virtual void Allocate() override;
};
-// A ConstCpuTensorHandle that wraps an already allocated memory region.
+// A ConstTensorHandle that wraps an already allocated memory region.
//
// This allows users to pass in const memory to a network.
// Clients must make sure the passed in memory region stays alive for the lifetime of
-// the PassthroughCpuTensorHandle instance.
+// the PassthroughTensorHandle instance.
//
-// Note there is no polymorphism to/from PassthroughCpuTensorHandle.
-class ConstPassthroughCpuTensorHandle : public ConstCpuTensorHandle
+// Note there is no polymorphism to/from PassthroughTensorHandle.
+class ConstPassthroughTensorHandle : public ConstTensorHandle
{
public:
- ConstPassthroughCpuTensorHandle(const TensorInfo& tensorInfo, const void* mem)
- : ConstCpuTensorHandle(tensorInfo)
+ ConstPassthroughTensorHandle(const TensorInfo& tensorInfo, const void* mem)
+ : ConstTensorHandle(tensorInfo)
{
SetConstMemory(mem);
}
@@ -170,16 +170,16 @@ public:
// Template specializations.
template <>
-const void* ConstCpuTensorHandle::GetConstTensor() const;
+const void* ConstTensorHandle::GetConstTensor() const;
template <>
-void* CpuTensorHandle::GetTensor() const;
+void* TensorHandle::GetTensor() const;
class ManagedConstTensorHandle
{
public:
- explicit ManagedConstTensorHandle(std::shared_ptr<ConstCpuTensorHandle> ptr)
+ explicit ManagedConstTensorHandle(std::shared_ptr<ConstTensorHandle> ptr)
: m_Mapped(false)
, m_TensorHandle(std::move(ptr)) {};
@@ -200,7 +200,7 @@ public:
}
// Delete copy constructor as it's unnecessary
- ManagedConstTensorHandle(const ConstCpuTensorHandle& other) = delete;
+ ManagedConstTensorHandle(const ConstTensorHandle& other) = delete;
// Delete copy assignment as it's unnecessary
ManagedConstTensorHandle& operator=(const ManagedConstTensorHandle& other) = delete;
@@ -239,7 +239,19 @@ public:
private:
bool m_Mapped;
- std::shared_ptr<ConstCpuTensorHandle> m_TensorHandle;
+ std::shared_ptr<ConstTensorHandle> m_TensorHandle;
};
+using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstCpuTensorHandle is deprecated, "
+ "use ConstTensorHandle instead") = ConstTensorHandle;
+using CpuTensorHandle ARMNN_DEPRECATED_MSG("CpuTensorHandle is deprecated, "
+ "use TensorHandle instead") = TensorHandle;
+using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG("ScopedCpuTensorHandle is deprecated, "
+ "use ScopedTensorHandle instead") = ScopedTensorHandle;
+using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("PassthroughCpuTensorHandle is deprecated, use "
+ "PassthroughTensorHandle instead") = PassthroughTensorHandle;
+using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstPassthroughCpuTensorHandle is "
+ "deprecated, use ConstPassthroughTensorHandle "
+ "instead") = ConstPassthroughTensorHandle;
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 470d460ef3..be0ac707a8 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -3,8 +3,8 @@
// SPDX-License-Identifier: MIT
//
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index abaa4f5185..77d4209657 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -4,7 +4,7 @@
//
#pragma once
-#include <armnn/backends/CpuTensorHandleFwd.hpp>
+#include <armnn/backends/TensorHandleFwd.hpp>
#include <armnn/backends/ITensorHandle.hpp>
#include <InternalTypes.hpp>
@@ -175,8 +175,8 @@ struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnec
{
}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -202,8 +202,8 @@ struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2
{
}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -217,8 +217,8 @@ struct DepthwiseConvolution2dQueueDescriptor : QueueDescriptorWithParameters<Dep
{
}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -230,7 +230,7 @@ struct DetectionPostProcessQueueDescriptor : QueueDescriptorWithParameters<Detec
{
}
- const ConstCpuTensorHandle* m_Anchors;
+ const ConstTensorHandle* m_Anchors;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -305,10 +305,10 @@ struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNo
{
}
- const ConstCpuTensorHandle* m_Mean;
- const ConstCpuTensorHandle* m_Variance;
- const ConstCpuTensorHandle* m_Beta;
- const ConstCpuTensorHandle* m_Gamma;
+ const ConstTensorHandle* m_Mean;
+ const ConstTensorHandle* m_Variance;
+ const ConstTensorHandle* m_Beta;
+ const ConstTensorHandle* m_Gamma;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -336,8 +336,8 @@ struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuant
{
}
- const ConstCpuTensorHandle* m_Min;
- const ConstCpuTensorHandle* m_Max;
+ const ConstTensorHandle* m_Min;
+ const ConstTensorHandle* m_Max;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -364,7 +364,7 @@ struct ConstantQueueDescriptor : QueueDescriptor
{
}
- const ConstCpuTensorHandle* m_LayerOutput;
+ const ConstTensorHandle* m_LayerOutput;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -416,27 +416,27 @@ struct LstmQueueDescriptor : QueueDescriptorWithParameters<LstmDescriptor>
{
}
- const ConstCpuTensorHandle* m_InputToInputWeights;
- const ConstCpuTensorHandle* m_InputToForgetWeights;
- const ConstCpuTensorHandle* m_InputToCellWeights;
- const ConstCpuTensorHandle* m_InputToOutputWeights;
- const ConstCpuTensorHandle* m_RecurrentToInputWeights;
- const ConstCpuTensorHandle* m_RecurrentToForgetWeights;
- const ConstCpuTensorHandle* m_RecurrentToCellWeights;
- const ConstCpuTensorHandle* m_RecurrentToOutputWeights;
- const ConstCpuTensorHandle* m_CellToInputWeights;
- const ConstCpuTensorHandle* m_CellToForgetWeights;
- const ConstCpuTensorHandle* m_CellToOutputWeights;
- const ConstCpuTensorHandle* m_InputGateBias;
- const ConstCpuTensorHandle* m_ForgetGateBias;
- const ConstCpuTensorHandle* m_CellBias;
- const ConstCpuTensorHandle* m_OutputGateBias;
- const ConstCpuTensorHandle* m_ProjectionWeights;
- const ConstCpuTensorHandle* m_ProjectionBias;
- const ConstCpuTensorHandle* m_InputLayerNormWeights;
- const ConstCpuTensorHandle* m_ForgetLayerNormWeights;
- const ConstCpuTensorHandle* m_CellLayerNormWeights;
- const ConstCpuTensorHandle* m_OutputLayerNormWeights;
+ const ConstTensorHandle* m_InputToInputWeights;
+ const ConstTensorHandle* m_InputToForgetWeights;
+ const ConstTensorHandle* m_InputToCellWeights;
+ const ConstTensorHandle* m_InputToOutputWeights;
+ const ConstTensorHandle* m_RecurrentToInputWeights;
+ const ConstTensorHandle* m_RecurrentToForgetWeights;
+ const ConstTensorHandle* m_RecurrentToCellWeights;
+ const ConstTensorHandle* m_RecurrentToOutputWeights;
+ const ConstTensorHandle* m_CellToInputWeights;
+ const ConstTensorHandle* m_CellToForgetWeights;
+ const ConstTensorHandle* m_CellToOutputWeights;
+ const ConstTensorHandle* m_InputGateBias;
+ const ConstTensorHandle* m_ForgetGateBias;
+ const ConstTensorHandle* m_CellBias;
+ const ConstTensorHandle* m_OutputGateBias;
+ const ConstTensorHandle* m_ProjectionWeights;
+ const ConstTensorHandle* m_ProjectionBias;
+ const ConstTensorHandle* m_InputLayerNormWeights;
+ const ConstTensorHandle* m_ForgetLayerNormWeights;
+ const ConstTensorHandle* m_CellLayerNormWeights;
+ const ConstTensorHandle* m_OutputLayerNormWeights;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -543,8 +543,8 @@ struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters<Tra
m_Bias(nullptr)
{}
- const ConstCpuTensorHandle* m_Weight;
- const ConstCpuTensorHandle* m_Bias;
+ const ConstTensorHandle* m_Weight;
+ const ConstTensorHandle* m_Bias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -581,27 +581,27 @@ struct QLstmQueueDescriptor : QueueDescriptorWithParameters<QLstmDescriptor>
{
}
- const ConstCpuTensorHandle* m_InputToInputWeights;
- const ConstCpuTensorHandle* m_InputToForgetWeights;
- const ConstCpuTensorHandle* m_InputToCellWeights;
- const ConstCpuTensorHandle* m_InputToOutputWeights;
- const ConstCpuTensorHandle* m_RecurrentToInputWeights;
- const ConstCpuTensorHandle* m_RecurrentToForgetWeights;
- const ConstCpuTensorHandle* m_RecurrentToCellWeights;
- const ConstCpuTensorHandle* m_RecurrentToOutputWeights;
- const ConstCpuTensorHandle* m_CellToInputWeights;
- const ConstCpuTensorHandle* m_CellToForgetWeights;
- const ConstCpuTensorHandle* m_CellToOutputWeights;
- const ConstCpuTensorHandle* m_InputGateBias;
- const ConstCpuTensorHandle* m_ForgetGateBias;
- const ConstCpuTensorHandle* m_CellBias;
- const ConstCpuTensorHandle* m_OutputGateBias;
- const ConstCpuTensorHandle* m_ProjectionWeights;
- const ConstCpuTensorHandle* m_ProjectionBias;
- const ConstCpuTensorHandle* m_InputLayerNormWeights;
- const ConstCpuTensorHandle* m_ForgetLayerNormWeights;
- const ConstCpuTensorHandle* m_CellLayerNormWeights;
- const ConstCpuTensorHandle* m_OutputLayerNormWeights;
+ const ConstTensorHandle* m_InputToInputWeights;
+ const ConstTensorHandle* m_InputToForgetWeights;
+ const ConstTensorHandle* m_InputToCellWeights;
+ const ConstTensorHandle* m_InputToOutputWeights;
+ const ConstTensorHandle* m_RecurrentToInputWeights;
+ const ConstTensorHandle* m_RecurrentToForgetWeights;
+ const ConstTensorHandle* m_RecurrentToCellWeights;
+ const ConstTensorHandle* m_RecurrentToOutputWeights;
+ const ConstTensorHandle* m_CellToInputWeights;
+ const ConstTensorHandle* m_CellToForgetWeights;
+ const ConstTensorHandle* m_CellToOutputWeights;
+ const ConstTensorHandle* m_InputGateBias;
+ const ConstTensorHandle* m_ForgetGateBias;
+ const ConstTensorHandle* m_CellBias;
+ const ConstTensorHandle* m_OutputGateBias;
+ const ConstTensorHandle* m_ProjectionWeights;
+ const ConstTensorHandle* m_ProjectionBias;
+ const ConstTensorHandle* m_InputLayerNormWeights;
+ const ConstTensorHandle* m_ForgetLayerNormWeights;
+ const ConstTensorHandle* m_CellLayerNormWeights;
+ const ConstTensorHandle* m_OutputLayerNormWeights;
void Validate(const WorkloadInfo& workloadInfo) const;
};
@@ -625,20 +625,20 @@ struct QuantizedLstmQueueDescriptor : QueueDescriptor
, m_OutputGateBias(nullptr)
{}
- const ConstCpuTensorHandle* m_InputToInputWeights;
- const ConstCpuTensorHandle* m_InputToForgetWeights;
- const ConstCpuTensorHandle* m_InputToCellWeights;
- const ConstCpuTensorHandle* m_InputToOutputWeights;
+ const ConstTensorHandle* m_InputToInputWeights;
+ const ConstTensorHandle* m_InputToForgetWeights;
+ const ConstTensorHandle* m_InputToCellWeights;
+ const ConstTensorHandle* m_InputToOutputWeights;
- const ConstCpuTensorHandle* m_RecurrentToInputWeights;
- const ConstCpuTensorHandle* m_RecurrentToForgetWeights;
- const ConstCpuTensorHandle* m_RecurrentToCellWeights;
- const ConstCpuTensorHandle* m_RecurrentToOutputWeights;
+ const ConstTensorHandle* m_RecurrentToInputWeights;
+ const ConstTensorHandle* m_RecurrentToForgetWeights;
+ const ConstTensorHandle* m_RecurrentToCellWeights;
+ const ConstTensorHandle* m_RecurrentToOutputWeights;
- const ConstCpuTensorHandle* m_InputGateBias;
- const ConstCpuTensorHandle* m_ForgetGateBias;
- const ConstCpuTensorHandle* m_CellBias;
- const ConstCpuTensorHandle* m_OutputGateBias;
+ const ConstTensorHandle* m_InputGateBias;
+ const ConstTensorHandle* m_ForgetGateBias;
+ const ConstTensorHandle* m_CellBias;
+ const ConstTensorHandle* m_OutputGateBias;
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 733d77e427..c5fc9d0fe2 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -15,7 +15,7 @@
#include <armnn/utility/TransformIterator.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 5886630cd9..c8105aea04 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -11,7 +11,7 @@
namespace armnn
{
-armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
+armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
const PermutationVector& permutationVector, void* permuteBuffer)
{
ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
@@ -130,7 +130,7 @@ TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, D
return weightPermutedInfo;
}
-armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor,
+armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* weightTensor,
DataLayout dataLayout,
void* permuteBuffer)
{
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 354362ec8f..06d2eccf3e 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include "CpuTensorHandle.hpp"
+#include "TensorHandle.hpp"
#include <armnn/backends/ITensorHandle.hpp>
#include <armnn/Tensor.hpp>
@@ -206,7 +206,7 @@ void GatherTensorHandlePairs(const DescriptorType& descriptor,
int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim);
-armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
+armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
const PermutationVector& permutationVector,
void* permuteBuffer);
@@ -214,7 +214,7 @@ void ReshapeWeightsForAcl(TensorInfo& weightInfo, DataLayout dataLayout);
TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo& weightInfo, DataLayout dataLayout);
-armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle* weightTensor,
+armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstTensorHandle* weightTensor,
DataLayout dataLayout,
void* permuteBuffer);
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 6e4a8c75d2..63d768eca5 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -8,7 +8,7 @@
# file in the root of ArmNN
COMMON_SOURCES := \
- CpuTensorHandle.cpp \
+ TensorHandle.cpp \
DynamicBackend.cpp \
DynamicBackendUtils.cpp \
IBackendInternal.cpp \
diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp
index 8c4da621ed..99412b9694 100644
--- a/src/backends/backendsCommon/test/CommonTestUtils.hpp
+++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp
@@ -13,7 +13,7 @@
#include <armnn/BackendRegistry.hpp>
#include <armnn/Types.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <test/TestUtils.hpp>
@@ -72,8 +72,8 @@ bool Compare(T a, T b, float tolerance = 0.000001f)
template <typename ConvolutionLayer>
void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo)
{
- layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weightInfo);
- layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(biasInfo);
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weightInfo);
+ layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(biasInfo);
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index 56a794e77c..2dd5298059 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -5,7 +5,7 @@
#include <armnn/Exceptions.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
#include <boost/test/unit_test.hpp>
@@ -121,15 +121,15 @@ BOOST_AUTO_TEST_CASE(TestAsyncExecute)
ConstTensor constInputTensor(info, inVals);
ConstTensor constOutputTensor(info, outVals);
- ScopedCpuTensorHandle syncInput0(constInputTensor);
- ScopedCpuTensorHandle syncOutput0(constOutputTensor);
+ ScopedTensorHandle syncInput0(constInputTensor);
+ ScopedTensorHandle syncOutput0(constOutputTensor);
std::unique_ptr<Workload0> workload0 = CreateWorkload<Workload0>(info, &syncInput0, &syncOutput0);
workload0.get()->Execute();
- ScopedCpuTensorHandle asyncInput0(constInputTensor);
- ScopedCpuTensorHandle asyncOutput0(constOutputTensor);
+ ScopedTensorHandle asyncInput0(constInputTensor);
+ ScopedTensorHandle asyncOutput0(constOutputTensor);
WorkingMemDescriptor workingMemDescriptor0;
workingMemDescriptor0.m_Inputs = std::vector<ITensorHandle*>{&asyncInput0};
@@ -159,13 +159,13 @@ BOOST_AUTO_TEST_CASE(TestDefaultAsyncExecute)
ConstTensor constOutputTensor(info, outVals);
ConstTensor defaultTensor(info, &defaultVals);
- ScopedCpuTensorHandle defaultInput = ScopedCpuTensorHandle(defaultTensor);
- ScopedCpuTensorHandle defaultOutput = ScopedCpuTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultInput = ScopedTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultOutput = ScopedTensorHandle(defaultTensor);
std::unique_ptr<Workload1> workload1 = CreateWorkload<Workload1>(info, &defaultInput, &defaultOutput);
- ScopedCpuTensorHandle asyncInput(constInputTensor);
- ScopedCpuTensorHandle asyncOutput(constOutputTensor);
+ ScopedTensorHandle asyncInput(constInputTensor);
+ ScopedTensorHandle asyncOutput(constOutputTensor);
WorkingMemDescriptor workingMemDescriptor;
workingMemDescriptor.m_Inputs = std::vector<ITensorHandle*>{&asyncInput};
@@ -202,20 +202,20 @@ BOOST_AUTO_TEST_CASE(TestDefaultAsyncExeuteWithThreads)
ConstTensor defaultTensor(info, &defaultVals);
- ScopedCpuTensorHandle defaultInput = ScopedCpuTensorHandle(defaultTensor);
- ScopedCpuTensorHandle defaultOutput = ScopedCpuTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultInput = ScopedTensorHandle(defaultTensor);
+ ScopedTensorHandle defaultOutput = ScopedTensorHandle(defaultTensor);
std::unique_ptr<Workload1> workload = CreateWorkload<Workload1>(info, &defaultInput, &defaultOutput);
- ScopedCpuTensorHandle asyncInput1(constInputTensor1);
- ScopedCpuTensorHandle asyncOutput1(constOutputTensor1);
+ ScopedTensorHandle asyncInput1(constInputTensor1);
+ ScopedTensorHandle asyncOutput1(constOutputTensor1);
WorkingMemDescriptor workingMemDescriptor1;
workingMemDescriptor1.m_Inputs = std::vector<ITensorHandle*>{&asyncInput1};
workingMemDescriptor1.m_Outputs = std::vector<ITensorHandle*>{&asyncOutput1};
- ScopedCpuTensorHandle asyncInput2(constInputTensor2);
- ScopedCpuTensorHandle asyncOutput2(constOutputTensor2);
+ ScopedTensorHandle asyncInput2(constInputTensor2);
+ ScopedTensorHandle asyncOutput2(constOutputTensor2);
WorkingMemDescriptor workingMemDescriptor2;
workingMemDescriptor2.m_Inputs = std::vector<ITensorHandle*>{&asyncInput2};
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 8302bfd57d..a4f1613a58 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -9,8 +9,8 @@
#include <armnn/backends/DynamicBackend.hpp>
#include <armnn/backends/ILayerSupport.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/DynamicBackendUtils.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <Filesystem.hpp>
#include <reference/workloads/RefConvolution2dWorkload.hpp>
#include <Runtime.hpp>
@@ -1473,7 +1473,7 @@ void CreateReferenceDynamicBackendTestImpl()
{ outputInfo }
};
convolution2dQueueDescriptor.m_Inputs.push_back(nullptr);
- auto weights = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ auto weights = std::make_unique<ScopedTensorHandle>(weightInfo);
convolution2dQueueDescriptor.m_Weight = weights.get();
// Create a convolution workload with the dummy settings
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index b73efbe26c..4240bb1061 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -83,13 +83,13 @@ struct DummyLayer<armnn::BatchNormalizationLayer>
DummyLayer()
{
m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
- m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -240,9 +240,9 @@ struct DummyConvolutionLayer
desc.m_StrideX = 1;
desc.m_StrideY = 1;
m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -278,7 +278,7 @@ struct DummyLayer<armnn::DetectionPostProcessLayer>
DummyLayer()
{
m_Layer = dummyGraph.AddLayer<armnn::DetectionPostProcessLayer>(armnn::DetectionPostProcessDescriptor(), "");
- m_Layer->m_Anchors = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -299,30 +299,30 @@ struct DummyLstmLayer
desc.m_CifgEnabled = false;
m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
- m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
- m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
@@ -354,57 +354,57 @@ struct DummyQLstmLayer
m_Layer = dummyGraph.AddLayer<QLstmLayerType>(armnn::QLstmDescriptor(), "qLstm");
// Basic params
- m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
// CIFG optional params
- m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
// Projection optional params
- m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS8));
- m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
// Peephole optional params
- m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
// Layer normalization optional params
- m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
- m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QSymmS16));
}
@@ -423,31 +423,31 @@ struct DummyLayer<armnn::QuantizedLstmLayer, void>
{
m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
- m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
- m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
- m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
}
@@ -466,7 +466,7 @@ struct DummyLayer<armnn::FullyConnectedLayer>
{
armnn::FullyConnectedLayer::DescriptorType desc;
m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
- m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
+ m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
}
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index 817cdeed79..0ca4b0a7f9 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -7,7 +7,7 @@
#include <Graph.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <boost/test/unit_test.hpp>
@@ -35,10 +35,10 @@ BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
- layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
layer->m_Mean->Allocate();
layer->m_Variance->Allocate();
layer->m_Beta->Allocate();
@@ -87,9 +87,9 @@ BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({2, 3, 5, 3},
armnn::DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>
(TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
layer->m_Weight->Allocate();
@@ -131,8 +131,8 @@ BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({9}, DataType::Float32));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
@@ -170,9 +170,9 @@ BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
float inputsQScale = 1.0f;
float outputQScale = 2.0f;
- layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20},
DataType::QAsymmU8, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7},
GetBiasDataType(DataType::QAsymmU8), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 5ac548f42a..182c913777 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -7,7 +7,7 @@
#include <armnn/Exceptions.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
#include <reference/workloads/RefWorkloads.hpp>
@@ -32,7 +32,7 @@ BOOST_AUTO_TEST_CASE(BatchNormalizationQueueDescriptor_Validate_DifferentQuantiz
unsigned int sameShape[] = { 10 };
TensorInfo sameInfo = armnn::TensorInfo(1, sameShape, armnn::DataType::QAsymmU8);
- ScopedCpuTensorHandle sameTensor(sameInfo);
+ ScopedTensorHandle sameTensor(sameInfo);
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
@@ -136,8 +136,8 @@ BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
FullyConnectedQueueDescriptor invalidData;
WorkloadInfo invalidInfo;
- ScopedCpuTensorHandle weightTensor(weightsDesc);
- ScopedCpuTensorHandle biasTensor(biasesDesc);
+ ScopedTensorHandle weightTensor(weightsDesc);
+ ScopedTensorHandle biasTensor(biasesDesc);
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
@@ -515,27 +515,27 @@ BOOST_AUTO_TEST_CASE(LstmQueueDescriptor_Validate)
AddOutputToWorkload(data, info, cellStateOutTensorInfo, nullptr);
// AddOutputToWorkload(data, info, outputTensorInfo, nullptr); is left out
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
data.m_InputToInputWeights = &inputToInputWeightsTensor;
data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
@@ -657,14 +657,14 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
- ScopedCpuTensorHandle weightTensor(weightInfo);
+ ScopedTensorHandle weightTensor(weightInfo);
queueDescriptor.m_Weight = &weightTensor;
// Test 1: correct per-axis quantization values
const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
- ScopedCpuTensorHandle biasHandle1(biasInfo1);
+ ScopedTensorHandle biasHandle1(biasInfo1);
queueDescriptor.m_Bias = &biasHandle1;
BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
@@ -673,7 +673,7 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
- ScopedCpuTensorHandle biasHandle2(biasInfo2);
+ ScopedTensorHandle biasHandle2(biasInfo2);
queueDescriptor.m_Bias = &biasHandle2;
BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
@@ -682,7 +682,7 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
- ScopedCpuTensorHandle biasHandle3(biasInfo3);
+ ScopedTensorHandle biasHandle3(biasInfo3);
queueDescriptor.m_Bias = &biasHandle3;
BOOST_CHECK_THROW(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index eb4f461eb9..969d5dbcd1 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -11,7 +11,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <reference/test/RefWorkloadFactoryHelper.hpp>
@@ -74,10 +74,10 @@ LayerTestResult<T, 4> BatchNormTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
armnn::BatchNormalizationQueueDescriptor descriptor;
descriptor.m_Mean = &meanTensor;
@@ -160,10 +160,10 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
@@ -644,10 +644,10 @@ LayerTestResult<float,4> CompareBatchNormTest(
armnn::BatchNormalizationQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
- armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ armnn::ScopedTensorHandle meanTensor(tensorInfo);
+ armnn::ScopedTensorHandle varianceTensor(tensorInfo);
+ armnn::ScopedTensorHandle betaTensor(tensorInfo);
+ armnn::ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index 45c94d345b..c28ef40b45 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -11,7 +11,7 @@
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -101,7 +101,7 @@ LayerTestResult<T, 4> ConstantTestImpl(
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
+ armnn::ScopedTensorHandle constantTensor(inputTensorInfo);
AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
armnn::ConstantQueueDescriptor descriptor;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 4641e67aad..8f60415a66 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -13,7 +13,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -318,8 +318,8 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
// Permute the kernel if necessary
boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
if (layout == armnn::DataLayout::NHWC)
@@ -423,10 +423,10 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
armnn::Convolution2dQueueDescriptor data;
@@ -547,8 +547,8 @@ LayerTestResult<T,4> Convolution1dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo);
- armnn::ScopedCpuTensorHandle biasTensor(biasInfo);
+ armnn::ScopedTensorHandle weightsTensor(kernelInfo);
+ armnn::ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
@@ -1349,8 +1349,8 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -1722,11 +1722,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
if (biasEnabled)
{
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -1882,8 +1882,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -2095,8 +2095,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -2252,8 +2252,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
@@ -3007,8 +3007,8 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ armnn::ScopedTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedTensorHandle biasTensor(biasDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
@@ -3502,8 +3502,8 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
@@ -3756,8 +3756,8 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 3ee1fadd81..f68082762c 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -8,7 +8,7 @@
#include <armnn/Types.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -181,7 +181,7 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
auto outputScoresHandle = tensorHandleFactory.CreateTensorHandle(detectionScoresInfo);
auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo);
- armnn::ScopedCpuTensorHandle anchorsTensor(anchorsInfo);
+ armnn::ScopedTensorHandle anchorsTensor(anchorsInfo);
AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]);
armnn::DetectionPostProcessQueueDescriptor data;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index f8644007f2..157df99d64 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -6,7 +6,7 @@
#include "FakeQuantizationTestImpl.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -48,7 +48,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
data.m_Parameters.m_Min = min;
data.m_Parameters.m_Max = max;
- armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+ armnn::PassthroughTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
armnn::FakeQuantizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 9176094eb2..cd7f4efe31 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -8,7 +8,7 @@
#include <QuantizeHelper.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataTypeUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -40,8 +40,8 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
armnn::FullyConnectedQueueDescriptor data;
armnn::WorkloadInfo info;
- armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
- armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
+ armnn::ScopedTensorHandle weightsTensor(weightsDesc);
+ armnn::ScopedTensorHandle biasTensor(biasesDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 2e205dd58e..24a4dc4789 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -9,7 +9,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 7ee7a3465b..f32d367d37 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -10,7 +10,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 07a1f1e879..7a9652a8ea 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -9,7 +9,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -269,19 +269,19 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo8);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -971,23 +971,23 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
std::vector<float> projectionBiasVector(outputSize, 0.f);
auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo20);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1142,21 +1142,21 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1455,28 +1455,28 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
- armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
- armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
-
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
+ armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
+ armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
+
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -1673,21 +1673,21 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
@@ -1891,22 +1891,22 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
@@ -2145,28 +2145,28 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
{-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(projectionWeightsInfo);
+ armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
@@ -2411,24 +2411,24 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
{-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
- // ScopedCpuTensorHandles
- armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
+ // ScopedTensorHandles
+ armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
+ armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
+ armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
- armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
+ armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
- armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
+ armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
- armnn::ScopedCpuTensorHandle projectionWeightsTensor(projectionWeightsInfo);
+ armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
// Allocate and copy data
AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index 2e8e16f0c2..b52dcd5303 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/NumericCast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -75,7 +75,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
- armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
@@ -219,7 +219,7 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
data.m_Parameters.m_K = kappa;
data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
- armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+ armnn::PassthroughTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
armnn::NormalizationQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index c4cc914115..9688ce49f2 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -9,7 +9,7 @@
#include <ResolveType.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 328e724b54..85ce7e5e6f 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -10,7 +10,7 @@
#include <armnnUtils/Permute.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/test/DataLayoutUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -68,7 +68,7 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
}
// set up weights
- ScopedCpuTensorHandle weightsTensor(weights.first);
+ ScopedTensorHandle weightsTensor(weights.first);
TransposeConvolution2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
@@ -76,11 +76,11 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
- std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
+ std::unique_ptr<ScopedTensorHandle> biasesTensor;
if (descriptor.m_BiasEnabled)
{
// set up biases
- biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
+ biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
queueDescriptor.m_Bias = biasesTensor.get();
AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
@@ -643,8 +643,8 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 19ad773338..0d8d0a7c28 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -16,10 +16,10 @@
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/workloads/ClWorkloads.hpp>
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index e22479c25b..47e2f4e8d7 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -395,8 +395,8 @@ BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
WorkloadInfo workloadInfo;
- ScopedCpuTensorHandle weightTensor(kernelInfo);
- ScopedCpuTensorHandle biasTensor(biasInfo);
+ ScopedTensorHandle weightTensor(kernelInfo);
+ ScopedTensorHandle biasTensor(biasInfo);
AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index 2b8b0d48a7..794a45fa48 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -10,7 +10,7 @@
#include <layers/MeanLayer.hpp>
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClWorkloadFactory.hpp>
#include <cl/test/ClContextControlFixture.hpp>
#include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index b7d274fdca..5afafcb783 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -10,7 +10,7 @@
#include <Graph.hpp>
#include <Optimizer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 0e1f28ec4e..1b86d2e304 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -9,7 +9,7 @@
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <cl/ClContextControl.hpp>
@@ -79,10 +79,10 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
BatchNormalizationQueueDescriptor data;
WorkloadInfo info;
- ScopedCpuTensorHandle meanTensor(tensorInfo);
- ScopedCpuTensorHandle varianceTensor(tensorInfo);
- ScopedCpuTensorHandle betaTensor(tensorInfo);
- ScopedCpuTensorHandle gammaTensor(tensorInfo);
+ ScopedTensorHandle meanTensor(tensorInfo);
+ ScopedTensorHandle varianceTensor(tensorInfo);
+ ScopedTensorHandle betaTensor(tensorInfo);
+ ScopedTensorHandle gammaTensor(tensorInfo);
AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index 8997a9720d..e2f64a9d7e 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -7,7 +7,7 @@
#include "ClWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index 0ab7446026..4793cc6f8f 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClAdditionWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
index 8974930afa..7475cfa315 100644
--- a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index daaed17a90..361d6f87a5 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
index 8978c5a66e..b9736db642 100644
--- a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClBatchToSpaceNdWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClComparisonWorkload.cpp b/src/backends/cl/workloads/ClComparisonWorkload.cpp
index 20e5669807..35e6d68733 100644
--- a/src/backends/cl/workloads/ClComparisonWorkload.cpp
+++ b/src/backends/cl/workloads/ClComparisonWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index 3c5f23742a..1c2d476e0c 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index 40acb8ebd0..60dcd59268 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -8,7 +8,7 @@
#include <Half.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "ClWorkloadUtils.hpp"
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 99a981bd0c..5c731aa0a1 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -12,7 +12,7 @@
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 655f0c9c35..50cdb0a626 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -12,7 +12,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
@@ -137,7 +137,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
- ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
+ ScopedTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
if (m_BiasTensor)
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
index 52d8fab93c..42cc579a8c 100644
--- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/core/Types.h>
diff --git a/src/backends/cl/workloads/ClDivisionWorkload.cpp b/src/backends/cl/workloads/ClDivisionWorkload.cpp
index be5f3b8225..76220a1b64 100644
--- a/src/backends/cl/workloads/ClDivisionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClDivisionWorkload.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 9135d27376..d1d911ac13 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClFullyConnectedWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index bd38219a3e..984f21a4db 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClL2NormalizationFloatWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include "ClWorkloadUtils.hpp"
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index 58cc735704..908f20bfe5 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClLstmFloatWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
diff --git a/src/backends/cl/workloads/ClMaximumWorkload.cpp b/src/backends/cl/workloads/ClMaximumWorkload.cpp
index 85bea47f21..0aa15e5dd3 100644
--- a/src/backends/cl/workloads/ClMaximumWorkload.cpp
+++ b/src/backends/cl/workloads/ClMaximumWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClMinimumWorkload.cpp b/src/backends/cl/workloads/ClMinimumWorkload.cpp
index 07a78b5008..4924002432 100644
--- a/src/backends/cl/workloads/ClMinimumWorkload.cpp
+++ b/src/backends/cl/workloads/ClMinimumWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 31e9d022cc..2bd1e1615a 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClMultiplicationWorkload.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index 290d29ae52..e9b2caf6ee 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClNormalizationFloatWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
diff --git a/src/backends/cl/workloads/ClPreluWorkload.cpp b/src/backends/cl/workloads/ClPreluWorkload.cpp
index 73fa887532..9b45441b02 100644
--- a/src/backends/cl/workloads/ClPreluWorkload.cpp
+++ b/src/backends/cl/workloads/ClPreluWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClPreluWorkload.hpp"
#include "ClWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClQuantizeWorkload.cpp b/src/backends/cl/workloads/ClQuantizeWorkload.cpp
index 5c945e0921..527c64013b 100644
--- a/src/backends/cl/workloads/ClQuantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClQuantizeWorkload.cpp
@@ -9,7 +9,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
index 636bdecbeb..d50414b1cf 100644
--- a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
+++ b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClQuantizedLstmWorkload.hpp"
#include "ClWorkloadUtils.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index 0988babf23..1f82cfbee2 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -5,7 +5,7 @@
#include "ClReshapeWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "ClWorkloadUtils.hpp"
diff --git a/src/backends/cl/workloads/ClResizeWorkload.cpp b/src/backends/cl/workloads/ClResizeWorkload.cpp
index e47740624e..3406011d04 100644
--- a/src/backends/cl/workloads/ClResizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index 7b29cded0f..3aa8ebd2a8 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -11,7 +11,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
index 7a590d26b6..67487c4bf1 100644
--- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
index 70a817825e..8eb58c967e 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.cpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index 749282f53a..38c76eb648 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -7,7 +7,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
index 92e860fc42..adf32ce1fc 100644
--- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 31e0becfd8..e320fec342 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -6,7 +6,7 @@
#include "ClSubtractionWorkload.hpp"
#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index ff0fd5c168..b40b4b10ca 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -14,7 +14,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h>
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index b0cc8b4ed5..467505d55b 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <cl/OpenClTimer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/Utils.hpp>
@@ -88,7 +88,7 @@ inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
}
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
- const ConstCpuTensorHandle* handle)
+ const ConstTensorHandle* handle)
{
ARMNN_ASSERT(handle);
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 9967fb8604..8751d8ca2c 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -15,10 +15,10 @@
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <neon/workloads/NeonWorkloads.hpp>
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index a14122f573..13a4c732a4 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -9,7 +9,7 @@
#include <layers/ConvertFp32ToFp16Layer.hpp>
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonWorkloadFactory.hpp>
#include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
#include <backendsCommon/test/LayerTests.hpp>
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index d9edca1b6d..9acd0e41e2 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -8,7 +8,7 @@
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <neon/NeonTimer.hpp>
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index 9300b317a9..5891677c0d 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEArithmeticAddition.h>
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index 6290ecce17..cc85791ae6 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index 33480faf69..5da7cca83e 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -12,7 +12,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h>
diff --git a/src/backends/neon/workloads/NeonComparisonWorkload.cpp b/src/backends/neon/workloads/NeonComparisonWorkload.cpp
index 6e1f208228..01a6a0c78b 100644
--- a/src/backends/neon/workloads/NeonComparisonWorkload.cpp
+++ b/src/backends/neon/workloads/NeonComparisonWorkload.cpp
@@ -7,7 +7,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index a8f6dbed23..5cd906da41 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -9,7 +9,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index 0859abd394..77e4420794 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -11,7 +11,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <neon/NeonTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index fd8be17dfd..32af3f853a 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h>
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index db6bcc3ecb..ad509076b4 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -14,7 +14,7 @@
#include <neon/NeonLayerSupport.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/WorkloadUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
@@ -136,7 +136,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
- ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
+ ScopedTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
index 9ae82ff79f..07323d19ca 100644
--- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
@@ -11,7 +11,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
index 1a26d9510a..fa61a100a9 100644
--- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
@@ -10,7 +10,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 31489a0c32..713771be91 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -12,7 +12,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
diff --git a/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
index d6c30817b8..1bfd1e4d47 100644
--- a/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
@@ -8,7 +8,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
using namespace armnn::armcomputetensorutils;
diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
index 46d500bfdc..c4500d885a 100644
--- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
@@ -6,7 +6,7 @@
#include "NeonMaximumWorkload.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
index 53e483a182..519b3c4bc6 100644
--- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
@@ -7,7 +7,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
namespace armnn
{
diff --git a/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp b/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
index 4c2ba7513d..d809017692 100644
--- a/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
@@ -7,7 +7,7 @@
#include "NeonWorkloadUtils.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp
index ae07b250d6..ab01e30140 100644
--- a/src/backends/neon/workloads/NeonResizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
index de6f1378bd..4e428a2654 100644
--- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
@@ -10,7 +10,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
#include "NeonWorkloadUtils.hpp"
diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp
index 696de65620..0b327b8a37 100644
--- a/src/backends/neon/workloads/NeonStackWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStackWorkload.cpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
namespace armnn
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
index 21f0f6fa41..64f68aa6e2 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
@@ -12,7 +12,7 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 9c7e99c6a0..a1e545ce05 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -13,7 +13,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 589d2ea017..ab7616fbe2 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -8,7 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTimer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/Utils.hpp>
@@ -33,7 +33,7 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
}
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
- const ConstCpuTensorHandle* handle)
+ const ConstTensorHandle* handle)
{
ARMNN_ASSERT(handle);
diff --git a/src/backends/reference/RefTensorHandle.hpp b/src/backends/reference/RefTensorHandle.hpp
index 8c64dfbe63..3dfc0395df 100644
--- a/src/backends/reference/RefTensorHandle.hpp
+++ b/src/backends/reference/RefTensorHandle.hpp
@@ -4,7 +4,7 @@
//
#pragma once
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include "RefMemoryManager.hpp"
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index c1e3d58bd2..8e3bbe468f 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -3,10 +3,10 @@
// SPDX-License-Identifier: MIT
//
#include <Layer.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <backendsCommon/MemImportWorkload.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <reference/workloads/RefFillWorkload.hpp>
#include "RefWorkloadFactory.hpp"
#include "RefBackendId.hpp"
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index 1648583210..a1487061b6 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -7,7 +7,7 @@
#include <layers/ConvertFp32ToFp16Layer.hpp>
#include <test/TensorHelpers.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <reference/RefWorkloadFactory.hpp>
#include <reference/RefLayerSupport.hpp>
#include <backendsCommon/test/LayerTests.hpp>
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index b04d9d6c52..dadd1de1f2 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -173,8 +173,8 @@ BOOST_AUTO_TEST_CASE(TestManagedConstTensorHandle)
void* mem = nullptr;
TensorInfo info;
- // Use PassthroughCpuTensor as others are abstract
- auto passThroughHandle = std::make_shared<PassthroughCpuTensorHandle>(info, mem);
+ // Use PassthroughTensor as others are abstract
+ auto passThroughHandle = std::make_shared<PassthroughTensorHandle>(info, mem);
// Test managed handle is initialized with m_Mapped unset and once Map() called its set
ManagedConstTensorHandle managedHandle(passThroughHandle);
diff --git a/src/backends/reference/workloads/LstmUtils.cpp b/src/backends/reference/workloads/LstmUtils.cpp
index f197aae291..8e1db0e88f 100644
--- a/src/backends/reference/workloads/LstmUtils.cpp
+++ b/src/backends/reference/workloads/LstmUtils.cpp
@@ -7,7 +7,7 @@
#include "LstmUtils.hpp"
#include "BaseIterator.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
// Helper functions ported from the Android code base
@@ -296,12 +296,12 @@ void SetActivationParameters(uint32_t activation,
}
}
-std::unique_ptr<armnn::ScopedCpuTensorHandle> AssignScopedCpuTensorHandle(const armnn::ConstCpuTensorHandle* ptr)
+std::unique_ptr<armnn::ScopedTensorHandle> AssignScopedTensorHandle(const armnn::ConstTensorHandle *ptr)
{
if (!ptr)
{
return nullptr;
}
- return std::make_unique<armnn::ScopedCpuTensorHandle>(*ptr);
+ return std::make_unique<armnn::ScopedTensorHandle>(*ptr);
}
diff --git a/src/backends/reference/workloads/LstmUtils.hpp b/src/backends/reference/workloads/LstmUtils.hpp
index f6aff8b69f..11fbb77c5d 100644
--- a/src/backends/reference/workloads/LstmUtils.hpp
+++ b/src/backends/reference/workloads/LstmUtils.hpp
@@ -6,7 +6,7 @@
#pragma once
#include "BaseIterator.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
// Helper functions ported from the Android code base
// Refer to: android/external/tensorflow/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -85,4 +85,4 @@ void SetActivationParameters(uint32_t activation,
float& outA,
float& outB);
-std::unique_ptr<armnn::ScopedCpuTensorHandle> AssignScopedCpuTensorHandle(const armnn::ConstCpuTensorHandle* ptr);
+std::unique_ptr<armnn::ScopedTensorHandle> AssignScopedTensorHandle(const armnn::ConstTensorHandle *ptr);
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
index e1068896ba..282374d89b 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
@@ -16,10 +16,10 @@ namespace armnn
RefBatchNormalizationWorkload::RefBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info)
: BaseWorkload(descriptor, info)
- , m_Mean (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Mean)))
- , m_Variance(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Variance)))
- , m_Beta (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Beta)))
- , m_Gamma (std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Gamma)))
+ , m_Mean (std::make_unique<ScopedTensorHandle>(*(descriptor.m_Mean)))
+ , m_Variance(std::make_unique<ScopedTensorHandle>(*(descriptor.m_Variance)))
+ , m_Beta (std::make_unique<ScopedTensorHandle>(*(descriptor.m_Beta)))
+ , m_Gamma (std::make_unique<ScopedTensorHandle>(*(descriptor.m_Gamma)))
{}
void RefBatchNormalizationWorkload::Execute() const
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
index a8a72ef65c..6fe05fd192 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
@@ -21,10 +21,10 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Mean;
- std::unique_ptr<ScopedCpuTensorHandle> m_Variance;
- std::unique_ptr<ScopedCpuTensorHandle> m_Beta;
- std::unique_ptr<ScopedCpuTensorHandle> m_Gamma;
+ std::unique_ptr<ScopedTensorHandle> m_Mean;
+ std::unique_ptr<ScopedTensorHandle> m_Variance;
+ std::unique_ptr<ScopedTensorHandle> m_Beta;
+ std::unique_ptr<ScopedTensorHandle> m_Gamma;
};
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index 6d0ab413d8..5ae1af8967 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -16,7 +16,7 @@ RefConvolution2dWorkload::RefConvolution2dWorkload(
const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
{
- m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
m_FilterShape = rFilterInfo.GetShape();
@@ -24,7 +24,7 @@ RefConvolution2dWorkload::RefConvolution2dWorkload(
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
index 57df3ce6ae..3b2c76ade0 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
@@ -25,8 +25,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::unique_ptr<ScopedTensorHandle> m_Weight;
+ std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_FilterDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
index 8fe5dec7d1..b447d1a441 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
@@ -19,14 +19,14 @@ RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload(
const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
{
- m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
m_FilterShape = rFilterInfo.GetShape();
m_FilterDecoder = MakeDecoder<float>(rFilterInfo, m_Weight->Map(true));
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
index 65a8fd76cf..62289ca34f 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
@@ -24,8 +24,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr <ScopedCpuTensorHandle> m_Weight;
- std::unique_ptr <ScopedCpuTensorHandle> m_Bias;
+ std::unique_ptr <ScopedTensorHandle> m_Weight;
+ std::unique_ptr <ScopedTensorHandle> m_Bias;
std::unique_ptr <Decoder<float>> m_FilterDecoder;
std::unique_ptr <Decoder<float>> m_BiasDecoder;
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
index 6784e21585..4bc9eb1704 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
@@ -16,7 +16,7 @@ namespace armnn
RefDetectionPostProcessWorkload::RefDetectionPostProcessWorkload(
const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<DetectionPostProcessQueueDescriptor>(descriptor, info),
- m_Anchors(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Anchors))) {}
+ m_Anchors(std::make_unique<ScopedTensorHandle>(*(descriptor.m_Anchors))) {}
void RefDetectionPostProcessWorkload::Execute() const
{
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
index 007dcea456..920db96603 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
@@ -21,7 +21,7 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Anchors;
+ std::unique_ptr<ScopedTensorHandle> m_Anchors;
};
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index deb56d4c6b..3e63afac6f 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -18,14 +18,14 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
{
if (descriptor.m_Parameters.m_ConstantWeights)
{
- m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
m_WeightShape = rWeightInfo.GetShape();
m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(true));
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
}
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
index 5c0f67ebaf..6a4e5126cd 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
@@ -29,8 +29,8 @@ public:
private:
void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
- std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ std::unique_ptr<ScopedTensorHandle> m_Weight;
+ std::unique_ptr<ScopedTensorHandle> m_Bias;
std::unique_ptr<Decoder<float>> m_WeightDecoder;
std::unique_ptr<Decoder<float>> m_BiasDecoder;
diff --git a/src/backends/reference/workloads/RefLstmWorkload.cpp b/src/backends/reference/workloads/RefLstmWorkload.cpp
index 09423547da..3ddfd334b8 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.cpp
@@ -15,27 +15,27 @@ namespace armnn
RefLstmWorkload::RefLstmWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
: BaseWorkload<LstmQueueDescriptor>(descriptor, info)
- , m_InputToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToInputWeights))
- , m_InputToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToForgetWeights))
- , m_InputToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToCellWeights))
- , m_InputToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToOutputWeights))
- , m_RecurrentToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToInputWeights))
- , m_RecurrentToForgetWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToForgetWeights))
- , m_RecurrentToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToCellWeights))
- , m_RecurrentToOutputWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToOutputWeights))
- , m_CellToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToInputWeights))
- , m_CellToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToForgetWeights))
- , m_CellToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToOutputWeights))
- , m_InputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_InputGateBias))
- , m_ForgetGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ForgetGateBias))
- , m_CellBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_CellBias))
- , m_OutputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_OutputGateBias))
- , m_ProjectionWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionWeights))
- , m_ProjectionBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionBias))
- , m_InputLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_InputLayerNormWeights))
- , m_ForgetLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_ForgetLayerNormWeights))
- , m_CellLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_CellLayerNormWeights))
- , m_OutputLayerNormWeights (AssignScopedCpuTensorHandle(descriptor.m_OutputLayerNormWeights))
+ , m_InputToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToInputWeights))
+ , m_InputToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToForgetWeights))
+ , m_InputToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToCellWeights))
+ , m_InputToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToOutputWeights))
+ , m_RecurrentToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToInputWeights))
+ , m_RecurrentToForgetWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToForgetWeights))
+ , m_RecurrentToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToCellWeights))
+ , m_RecurrentToOutputWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToOutputWeights))
+ , m_CellToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToInputWeights))
+ , m_CellToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToForgetWeights))
+ , m_CellToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToOutputWeights))
+ , m_InputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_InputGateBias))
+ , m_ForgetGateBiasTensor (AssignScopedTensorHandle(descriptor.m_ForgetGateBias))
+ , m_CellBiasTensor (AssignScopedTensorHandle(descriptor.m_CellBias))
+ , m_OutputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_OutputGateBias))
+ , m_ProjectionWeightsTensor (AssignScopedTensorHandle(descriptor.m_ProjectionWeights))
+ , m_ProjectionBiasTensor (AssignScopedTensorHandle(descriptor.m_ProjectionBias))
+ , m_InputLayerNormWeights (AssignScopedTensorHandle(descriptor.m_InputLayerNormWeights))
+ , m_ForgetLayerNormWeights (AssignScopedTensorHandle(descriptor.m_ForgetLayerNormWeights))
+ , m_CellLayerNormWeights (AssignScopedTensorHandle(descriptor.m_CellLayerNormWeights))
+ , m_OutputLayerNormWeights (AssignScopedTensorHandle(descriptor.m_OutputLayerNormWeights))
{}
void RefLstmWorkload::Execute() const
diff --git a/src/backends/reference/workloads/RefLstmWorkload.hpp b/src/backends/reference/workloads/RefLstmWorkload.hpp
index b55a1f9a9e..6feffbcb00 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.hpp
@@ -23,27 +23,27 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeights;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeights;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeights;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_InputToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToOutputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToOutputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToOutputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_OutputGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_CellLayerNormWeights;
+ std::unique_ptr<ScopedTensorHandle> m_OutputLayerNormWeights;
float m_LayerNormEpsilon = static_cast<float>(1e-8);
};
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.cpp b/src/backends/reference/workloads/RefQLstmWorkload.cpp
index 7b7961c5a0..dc29d0b92d 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.cpp
@@ -15,32 +15,32 @@ namespace armnn
RefQLstmWorkload::RefQLstmWorkload(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
: BaseWorkload<QLstmQueueDescriptor>(descriptor, info)
- , m_InputToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToInputWeights))
- , m_InputToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToForgetWeights))
- , m_InputToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToCellWeights))
- , m_InputToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputToOutputWeights))
-
- , m_RecurrentToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToInputWeights))
- , m_RecurrentToForgetWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToForgetWeights))
- , m_RecurrentToCellWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_RecurrentToCellWeights))
- , m_RecurrentToOutputWeightsTensor(AssignScopedCpuTensorHandle(descriptor.m_RecurrentToOutputWeights))
-
- , m_CellToInputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToInputWeights))
- , m_CellToForgetWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToForgetWeights))
- , m_CellToOutputWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellToOutputWeights))
-
- , m_InputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_InputGateBias))
- , m_ForgetGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ForgetGateBias))
- , m_CellBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_CellBias))
- , m_OutputGateBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_OutputGateBias))
-
- , m_ProjectionWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionWeights))
- , m_ProjectionBiasTensor (AssignScopedCpuTensorHandle(descriptor.m_ProjectionBias))
-
- , m_InputLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_InputLayerNormWeights))
- , m_ForgetLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_ForgetLayerNormWeights))
- , m_CellLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_CellLayerNormWeights))
- , m_OutputLayerNormWeightsTensor (AssignScopedCpuTensorHandle(descriptor.m_OutputLayerNormWeights))
+ , m_InputToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToInputWeights))
+ , m_InputToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToForgetWeights))
+ , m_InputToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToCellWeights))
+ , m_InputToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToOutputWeights))
+
+ , m_RecurrentToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToInputWeights))
+ , m_RecurrentToForgetWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToForgetWeights))
+ , m_RecurrentToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToCellWeights))
+ , m_RecurrentToOutputWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToOutputWeights))
+
+ , m_CellToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToInputWeights))
+ , m_CellToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToForgetWeights))
+ , m_CellToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToOutputWeights))
+
+ , m_InputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_InputGateBias))
+ , m_ForgetGateBiasTensor (AssignScopedTensorHandle(descriptor.m_ForgetGateBias))
+ , m_CellBiasTensor (AssignScopedTensorHandle(descriptor.m_CellBias))
+ , m_OutputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_OutputGateBias))
+
+ , m_ProjectionWeightsTensor (AssignScopedTensorHandle(descriptor.m_ProjectionWeights))
+ , m_ProjectionBiasTensor (AssignScopedTensorHandle(descriptor.m_ProjectionBias))
+
+ , m_InputLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputLayerNormWeights))
+ , m_ForgetLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_ForgetLayerNormWeights))
+ , m_CellLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellLayerNormWeights))
+ , m_OutputLayerNormWeightsTensor (AssignScopedTensorHandle(descriptor.m_OutputLayerNormWeights))
{}
void RefQLstmWorkload::Execute() const
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.hpp b/src/backends/reference/workloads/RefQLstmWorkload.hpp
index f4242ec8a4..0aa7e10bbf 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.hpp
@@ -23,32 +23,32 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeightsTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeightsTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeightsTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellBiasTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBiasTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBiasTensor;
-
- std::unique_ptr<ScopedCpuTensorHandle> m_InputLayerNormWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_ForgetLayerNormWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_CellLayerNormWeightsTensor;
- std::unique_ptr<ScopedCpuTensorHandle> m_OutputLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_InputToOutputWeightsTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToCellWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_RecurrentToOutputWeightsTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_CellToInputWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToForgetWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellToOutputWeightsTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_InputGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetGateBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellBiasTensor;
+ std::unique_ptr<ScopedTensorHandle> m_OutputGateBiasTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ProjectionBiasTensor;
+
+ std::unique_ptr<ScopedTensorHandle> m_InputLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_ForgetLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_CellLayerNormWeightsTensor;
+ std::unique_ptr<ScopedTensorHandle> m_OutputLayerNormWeightsTensor;
float m_LayerNormEpsilon = static_cast<float>(1e-8);
};
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
index 634122835f..8665648fe6 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
@@ -18,7 +18,7 @@ RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(
BaseWorkload<TransposeConvolution2dQueueDescriptor>(descriptor, info)
{
// set up weights decoder
- m_Weights = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
+ m_Weights = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
const TensorInfo& weightsInfo = m_Weights->GetTensorInfo();
m_WeightsDecoder = MakeDecoder<float>(weightsInfo, m_Weights->Map(true));
@@ -27,7 +27,7 @@ RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(
// set up biases decoder
if (descriptor.m_Parameters.m_BiasEnabled)
{
- m_Biases = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
+ m_Biases = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
const TensorInfo& biasesInfo = m_Biases->GetTensorInfo();
m_BiasesDecoder = MakeDecoder<float>(biasesInfo, m_Biases->Map(true));
}
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
index 7c18f10293..997ccbfe12 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
@@ -8,7 +8,7 @@
#include "Decoders.hpp"
#include "Encoders.hpp"
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
namespace armnn
@@ -26,8 +26,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
- std::unique_ptr<ScopedCpuTensorHandle> m_Weights;
- std::unique_ptr<ScopedCpuTensorHandle> m_Biases;
+ std::unique_ptr<ScopedTensorHandle> m_Weights;
+ std::unique_ptr<ScopedTensorHandle> m_Biases;
std::unique_ptr<Decoder<float>> m_WeightsDecoder;
std::unique_ptr<Decoder<float>> m_BiasesDecoder;
diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp
index 0d839afc1c..3659617b7f 100644
--- a/src/backends/reference/workloads/RefWorkloadUtils.hpp
+++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/TensorHandle.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Types.hpp>