// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include namespace armnn { //////////////////////////////////////////// /// float32 helpers //////////////////////////////////////////// inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle) { // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. const ConstCpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); return cpuTensorHandle->GetTensorInfo(); } template inline const DataType* GetConstCpuData(const ITensorHandle* tensorHandle) { // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. const ConstCpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); return cpuTensorHandle->GetConstTensor(); } template inline DataType* GetCpuData(const ITensorHandle* tensorHandle) { // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); return cpuTensorHandle->GetTensor(); }; template const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data) { const ITensorHandle* tensorHandle = data.m_Inputs[idx]; return GetConstCpuData(tensorHandle); } template DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data) { const ITensorHandle* tensorHandle = data.m_Outputs[idx]; return GetCpuData(tensorHandle); } template const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data) { return GetInputTensorData(idx, data); } template float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data) { return GetOutputTensorData(idx, data); } template const Half* GetInputTensorDataHalf(unsigned int idx, const PayloadType& data) { return GetInputTensorData(idx, data); } template Half* GetOutputTensorDataHalf(unsigned int idx, const PayloadType& data) { return GetOutputTensorData(idx, data); } //////////////////////////////////////////// /// u8 helpers //////////////////////////////////////////// inline const uint8_t* GetConstCpuU8Data(const ITensorHandle* tensorHandle) { // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. const ConstCpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); return cpuTensorHandle->GetConstTensor(); }; inline uint8_t* GetCpuU8Data(const ITensorHandle* tensorHandle) { // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast(tensorHandle); return cpuTensorHandle->GetTensor(); }; template const uint8_t* GetInputTensorDataU8(unsigned int idx, const PayloadType& data) { const ITensorHandle* tensorHandle = data.m_Inputs[idx]; return GetConstCpuU8Data(tensorHandle); } template uint8_t* GetOutputTensorDataU8(unsigned int idx, const PayloadType& data) { const ITensorHandle* tensorHandle = data.m_Outputs[idx]; return GetCpuU8Data(tensorHandle); } template std::vector Dequantize(const T* quant, const TensorInfo& info) { std::vector ret(info.GetNumElements()); for (size_t i = 0; i < info.GetNumElements(); i++) { ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); } return ret; } inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info) { for (size_t i = 0; i < info.GetNumElements(); i++) { quant[i] = armnn::Quantize(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); } } } //namespace armnn