From 4fcda0101ec3d110c1d6d7bee5c83416b645528a Mon Sep 17 00:00:00 2001 From: telsoa01 Date: Fri, 9 Mar 2018 14:13:49 +0000 Subject: Release 18.02 Change-Id: Id3c11dc5ee94ef664374a988fcc6901e9a232fa6 --- .../backends/RefWorkloads/RefWorkloadUtils.hpp | 125 +++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp (limited to 'src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp') diff --git a/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp b/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp new file mode 100644 index 0000000000..088fe819e5 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp @@ -0,0 +1,125 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/CpuTensorHandle.hpp" + +#include +#include + +#include + +namespace armnn +{ + +//////////////////////////////////////////// +/// float32 helpers +//////////////////////////////////////////// + +inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. + const ConstCpuTensorHandle* cpuTensorHandle = + boost::polymorphic_downcast(tensorHandle); + return cpuTensorHandle->GetTensorInfo(); +} + +template +inline const DataType* GetConstCpuData(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. + const ConstCpuTensorHandle* cpuTensorHandle = + boost::polymorphic_downcast(tensorHandle); + return cpuTensorHandle->GetConstTensor(); +} + +template +inline DataType* GetCpuData(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. + const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); + return cpuTensorHandle->GetTensor(); +}; + +template +const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Inputs[idx]; + return GetConstCpuData(tensorHandle); +} + +template +DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Outputs[idx]; + return GetCpuData(tensorHandle); +} + +template +const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data) +{ + return GetInputTensorData(idx, data); +} + +template +float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data) +{ + return GetOutputTensorData(idx, data); +} + +//////////////////////////////////////////// +/// u8 helpers +//////////////////////////////////////////// + +inline const uint8_t* GetConstCpuU8Data(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. + const ConstCpuTensorHandle* cpuTensorHandle = + boost::polymorphic_downcast(tensorHandle); + return cpuTensorHandle->GetConstTensor(); +}; + +inline uint8_t* GetCpuU8Data(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. + const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); + return cpuTensorHandle->GetTensor(); +}; + +template +const uint8_t* GetInputTensorDataU8(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Inputs[idx]; + return GetConstCpuU8Data(tensorHandle); +} + +template +uint8_t* GetOutputTensorDataU8(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Outputs[idx]; + return GetCpuU8Data(tensorHandle); +} + +template +std::vector Dequantize(const T* quant, const TensorInfo& info) +{ + std::vector ret(info.GetNumElements()); + for (size_t i = 0; i < info.GetNumElements(); i++) + { + ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); + } + return ret; +} + +inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info) +{ + for (size_t i = 0; i < info.GetNumElements(); i++) + { + quant[i] = armnn::Quantize(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); + } +} + +} //namespace armnn -- cgit v1.2.1