diff options
author | Matthew Bentham <Matthew.Bentham@arm.com> | 2019-06-18 16:14:34 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-07-02 09:19:01 +0000 |
commit | 4cefc4135f7fbf5c2ba532a4f5b14d2811f9ed9e (patch) | |
tree | 1a7fbd6c636d83737b4e2319ed27255397921a32 /src/backends/reference/workloads/RefWorkloadUtils.hpp | |
parent | fe15eff9ed2007cd10800ec356ce2e8ea4b5f9d0 (diff) | |
download | armnn-4cefc4135f7fbf5c2ba532a4f5b14d2811f9ed9e.tar.gz |
IVGCVSW-3307 Introduce RefTensorHandle
Use it for intermediate tensors on reference backend.
Lays the groundwork for memory management in the reference backend.
Change-Id: I7d3ee132cac31bde70ae6e1b815f4f0b03d550a6
Signed-off-by: Matthew Bentham <Matthew.Bentham@arm.com>
Diffstat (limited to 'src/backends/reference/workloads/RefWorkloadUtils.hpp')
-rw-r--r-- | src/backends/reference/workloads/RefWorkloadUtils.hpp | 64 |
1 files changed, 10 insertions, 54 deletions
diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp index ce796160f2..c3260c8142 100644 --- a/src/backends/reference/workloads/RefWorkloadUtils.hpp +++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp @@ -9,8 +9,10 @@ #include <armnn/Tensor.hpp> #include <armnn/Types.hpp> -#include <Half.hpp> +#include <reference/RefTensorHandle.hpp> + +#include <Half.hpp> #include <boost/polymorphic_cast.hpp> namespace armnn @@ -22,41 +24,24 @@ namespace armnn inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle) { - // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. - const ConstCpuTensorHandle* cpuTensorHandle = - boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle); - return cpuTensorHandle->GetTensorInfo(); -} - -template <typename DataType> -inline const DataType* GetConstCpuData(const ITensorHandle* tensorHandle) -{ - // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. - const ConstCpuTensorHandle* cpuTensorHandle = - boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle); - return cpuTensorHandle->GetConstTensor<DataType>(); + // We know that reference workloads use RefTensorHandles for inputs and outputs + const RefTensorHandle* refTensorHandle = + boost::polymorphic_downcast<const RefTensorHandle*>(tensorHandle); + return refTensorHandle->GetTensorInfo(); } -template <typename DataType> -inline DataType* GetCpuData(const ITensorHandle* tensorHandle) -{ - // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. - const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast<const CpuTensorHandle*>(tensorHandle); - return cpuTensorHandle->GetTensor<DataType>(); -}; - template <typename DataType, typename PayloadType> const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data) { const ITensorHandle* tensorHandle = data.m_Inputs[idx]; - return GetConstCpuData<DataType>(tensorHandle); + return reinterpret_cast<const DataType*>(tensorHandle->Map()); } template <typename DataType, typename PayloadType> DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data) { - const ITensorHandle* tensorHandle = data.m_Outputs[idx]; - return GetCpuData<DataType>(tensorHandle); + ITensorHandle* tensorHandle = data.m_Outputs[idx]; + return reinterpret_cast<DataType*>(tensorHandle->Map()); } template <typename PayloadType> @@ -87,35 +72,6 @@ Half* GetOutputTensorDataHalf(unsigned int idx, const PayloadType& data) /// u8 helpers //////////////////////////////////////////// -inline const uint8_t* GetConstCpuU8Data(const ITensorHandle* tensorHandle) -{ - // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. - const ConstCpuTensorHandle* cpuTensorHandle = - boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle); - return cpuTensorHandle->GetConstTensor<uint8_t>(); -}; - -inline uint8_t* GetCpuU8Data(const ITensorHandle* tensorHandle) -{ - // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. - const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast<const CpuTensorHandle*>(tensorHandle); - return cpuTensorHandle->GetTensor<uint8_t>(); -}; - -template <typename PayloadType> -const uint8_t* GetInputTensorDataU8(unsigned int idx, const PayloadType& data) -{ - const ITensorHandle* tensorHandle = data.m_Inputs[idx]; - return GetConstCpuU8Data(tensorHandle); -} - -template <typename PayloadType> -uint8_t* GetOutputTensorDataU8(unsigned int idx, const PayloadType& data) -{ - const ITensorHandle* tensorHandle = data.m_Outputs[idx]; - return GetCpuU8Data(tensorHandle); -} - template<typename T> std::vector<float> Dequantize(const T* quant, const TensorInfo& info) { |