diff options
author | James Conroy <james.conroy@arm.com> | 2021-04-27 17:13:27 +0100 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2021-05-06 14:40:40 +0000 |
commit | 1f58f03d82c482626b1b4673b6c0e25da4338fb5 (patch) | |
tree | e92451e00d459a2fc0d870694460f482aa4c77ae /src/backends/backendsCommon/TensorHandle.cpp | |
parent | a7a12f5c3654da554ad6197beff0f0fc54681c92 (diff) | |
download | armnn-1f58f03d82c482626b1b4673b6c0e25da4338fb5.tar.gz |
IVGCVSW-5815 Generalise ConstCpuTensorHandle
* Generalises ConstCpuTensorHandle and inherited
classes by removing 'Cpu' from aliases.
* New renamed classes: ConstTensorHandle, TensorHandle,
ScopedTensorHandle, PassthroughTensorHandle,
ConstPassthroughTensorHandle.
Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I1824e0e134202735fb77051f20a7252f161dfe16
Diffstat (limited to 'src/backends/backendsCommon/TensorHandle.cpp')
-rw-r--r-- | src/backends/backendsCommon/TensorHandle.cpp | 141 |
1 files changed, 141 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/TensorHandle.cpp b/src/backends/backendsCommon/TensorHandle.cpp new file mode 100644 index 0000000000..d4660d6de3 --- /dev/null +++ b/src/backends/backendsCommon/TensorHandle.cpp @@ -0,0 +1,141 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include <armnn/Exceptions.hpp> +#include <armnn/utility/IgnoreUnused.hpp> + +#include <backendsCommon/TensorHandle.hpp> + +#include <cstring> + +namespace armnn +{ + +TensorShape GetUnpaddedTensorStrides(const TensorInfo& tensorInfo) +{ + TensorShape shape(tensorInfo.GetShape()); + auto size = GetDataTypeSize(tensorInfo.GetDataType()); + auto runningSize = size; + std::vector<unsigned int> strides(shape.GetNumDimensions()); + auto lastIdx = shape.GetNumDimensions()-1; + for (unsigned int i=0; i < lastIdx ; i++) + { + strides[lastIdx-i] = runningSize; + runningSize *= shape[lastIdx-i]; + } + strides[0] = runningSize; + return TensorShape(shape.GetNumDimensions(), strides.data()); +} + +ConstTensorHandle::ConstTensorHandle(const TensorInfo& tensorInfo) +: m_TensorInfo(tensorInfo) +, m_Memory(nullptr) +{ +} + +template <> +const void* ConstTensorHandle::GetConstTensor<void>() const +{ + return m_Memory; +} + +TensorHandle::TensorHandle(const TensorInfo& tensorInfo) +: ConstTensorHandle(tensorInfo) +, m_MutableMemory(nullptr) +{ +} + +template <> +void* TensorHandle::GetTensor<void>() const +{ + return m_MutableMemory; +} + +ScopedTensorHandle::ScopedTensorHandle(const TensorInfo& tensorInfo) +: TensorHandle(tensorInfo) +{ +} + +ScopedTensorHandle::ScopedTensorHandle(const ConstTensor& tensor) +: ScopedTensorHandle(tensor.GetInfo()) +{ + CopyFrom(tensor.GetMemoryArea(), tensor.GetNumBytes()); +} + +ScopedTensorHandle::ScopedTensorHandle(const ConstTensorHandle& tensorHandle) +: ScopedTensorHandle(tensorHandle.GetTensorInfo()) +{ + CopyFrom(tensorHandle.GetConstTensor<void>(), tensorHandle.GetTensorInfo().GetNumBytes()); +} + +ScopedTensorHandle::ScopedTensorHandle(const ScopedTensorHandle& other) +: TensorHandle(other.GetTensorInfo()) +{ + CopyFrom(other); +} + +ScopedTensorHandle& ScopedTensorHandle::operator=(const ScopedTensorHandle& other) +{ + ::operator delete(GetTensor<void>()); + SetMemory(nullptr); + CopyFrom(other); + return *this; +} + +ScopedTensorHandle::~ScopedTensorHandle() +{ + ::operator delete(GetTensor<void>()); +} + +void ScopedTensorHandle::Allocate() +{ + if (GetTensor<void>() == nullptr) + { + SetMemory(::operator new(GetTensorInfo().GetNumBytes())); + } + else + { + throw InvalidArgumentException("TensorHandle::Allocate Trying to allocate a TensorHandle" + "that already has allocated memory."); + } +} + +void ScopedTensorHandle::CopyOutTo(void* memory) const +{ + memcpy(memory, GetTensor<void>(), GetTensorInfo().GetNumBytes()); +} + +void ScopedTensorHandle::CopyInFrom(const void* memory) +{ + memcpy(GetTensor<void>(), memory, GetTensorInfo().GetNumBytes()); +} + +void ScopedTensorHandle::CopyFrom(const ScopedTensorHandle& other) +{ + CopyFrom(other.GetTensor<void>(), other.GetTensorInfo().GetNumBytes()); +} + +void ScopedTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes) +{ + ARMNN_ASSERT(GetTensor<void>() == nullptr); + ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes); + + if (srcMemory) + { + Allocate(); + memcpy(GetTensor<void>(), srcMemory, numBytes); + } +} + +void PassthroughTensorHandle::Allocate() +{ + throw InvalidArgumentException("PassthroughTensorHandle::Allocate() should never be called"); +} + +void ConstPassthroughTensorHandle::Allocate() +{ + throw InvalidArgumentException("ConstPassthroughTensorHandle::Allocate() should never be called"); +} + +} // namespace armnn |