From ef38d5d0f071c53883e2b2f13c85bfb3df34bf88 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Mon, 25 Mar 2019 09:03:35 +0000 Subject: IVGCVSW-2861 Refactor the Reference Elementwise workloads * Refactored Elementwise Workload into the single workload. * Execute() function will react based on the DataType. Change-Id: I6d4d6a74cec150ed8cb252e70b629ed968e7093d Signed-off-by: Sadik Armagan --- .../reference/workloads/RefElementwiseWorkload.cpp | 161 ++++++++++++++------- 1 file changed, 105 insertions(+), 56 deletions(-) (limited to 'src/backends/reference/workloads/RefElementwiseWorkload.cpp') diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp index c9b93c8524..356d7a0c16 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp @@ -7,69 +7,118 @@ #include "ElementwiseFunction.hpp" #include "RefWorkloadUtils.hpp" #include "Profiling.hpp" +#include "StringMapping.hpp" +#include "TypeUtils.hpp" #include namespace armnn { -template -void BaseFloat32ElementwiseWorkload::ExecuteImpl(const char * debugString) const +template +void RefElementwiseWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); - - auto data = Float32Workload::GetData(); - const TensorShape& inShape0 = GetTensorInfo(data.m_Inputs[0]).GetShape(); - const TensorShape& inShape1 = GetTensorInfo(data.m_Inputs[1]).GetShape(); - const TensorShape& outShape = GetTensorInfo(data.m_Outputs[0]).GetShape(); - - const float* inData0 = GetInputTensorDataFloat(0, data); - const float* inData1 = GetInputTensorDataFloat(1, data); - float* outData = GetOutputTensorDataFloat(0, data); - - ElementwiseFunction(inShape0, inShape1, outShape, inData0, inData1, outData); -} - -template -void BaseUint8ElementwiseWorkload::ExecuteImpl(const char * debugString) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); - - auto data = Uint8Workload::GetData(); - const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); - const TensorInfo& inputInfo1 = GetTensorInfo(data.m_Inputs[1]); - const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); - - auto dequant0 = Dequantize(GetInputTensorDataU8(0, data), inputInfo0); - auto dequant1 = Dequantize(GetInputTensorDataU8(1, data), inputInfo1); - - std::vector results(outputInfo.GetNumElements()); - - ElementwiseFunction(inputInfo0.GetShape(), - inputInfo1.GetShape(), - outputInfo.GetShape(), - dequant0.data(), - dequant1.data(), - results.data()); - - Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo); + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, StringMapping::Instance().Get(DebugString)); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const TensorShape& inShape0 = inputInfo0.GetShape(); + const TensorShape& inShape1 = inputInfo1.GetShape(); + const TensorShape& outShape = outputInfo.GetShape(); + + switch(DataType) + { + case armnn::DataType::QuantisedAsymm8: + { + std::vector results(outputInfo.GetNumElements()); + ElementwiseFunction(inShape0, + inShape1, + outShape, + Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0).data(), + Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1).data(), + results.data()); + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); + break; + } + case armnn::DataType::Float32: + { + ElementwiseFunction(inShape0, + inShape1, + outShape, + GetInputTensorDataFloat(0, m_Data), + GetInputTensorDataFloat(1, m_Data), + GetOutputTensorDataFloat(0, m_Data)); + break; + } + default: + BOOST_ASSERT_MSG(false, "Unknown Data Type!"); + break; + } } } -template class armnn::BaseFloat32ElementwiseWorkload>; -template class armnn::BaseUint8ElementwiseWorkload>; - -template class armnn::BaseFloat32ElementwiseWorkload>; -template class armnn::BaseUint8ElementwiseWorkload>; - -template class armnn::BaseFloat32ElementwiseWorkload>; -template class armnn::BaseUint8ElementwiseWorkload>; - -template class armnn::BaseFloat32ElementwiseWorkload>; -template class armnn::BaseUint8ElementwiseWorkload>; - -template class armnn::BaseFloat32ElementwiseWorkload>; -template class armnn::BaseUint8ElementwiseWorkload>; - -template class armnn::BaseFloat32ElementwiseWorkload>; -template class armnn::BaseUint8ElementwiseWorkload>; +template class armnn::RefElementwiseWorkload, + armnn::DataType::Float32, + armnn::AdditionQueueDescriptor, + armnn::StringMapping::RefAdditionWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::QuantisedAsymm8, + armnn::AdditionQueueDescriptor, + armnn::StringMapping::RefAdditionWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::Float32, + armnn::SubtractionQueueDescriptor, + armnn::StringMapping::RefSubtractionWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::QuantisedAsymm8, + armnn::SubtractionQueueDescriptor, + armnn::StringMapping::RefSubtractionWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::Float32, + armnn::MultiplicationQueueDescriptor, + armnn::StringMapping::RefMultiplicationWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::QuantisedAsymm8, + armnn::MultiplicationQueueDescriptor, + armnn::StringMapping::RefMultiplicationWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::Float32, + armnn::DivisionQueueDescriptor, + armnn::StringMapping::RefDivisionWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::QuantisedAsymm8, + armnn::DivisionQueueDescriptor, + armnn::StringMapping::RefDivisionWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::Float32, + armnn::MaximumQueueDescriptor, + armnn::StringMapping::RefMaximumWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::QuantisedAsymm8, + armnn::MaximumQueueDescriptor, + armnn::StringMapping::RefMaximumWorkload_Execute>; + + +template class armnn::RefElementwiseWorkload, + armnn::DataType::Float32, + armnn::MinimumQueueDescriptor, + armnn::StringMapping::RefMinimumWorkload_Execute>; + +template class armnn::RefElementwiseWorkload, + armnn::DataType::QuantisedAsymm8, + armnn::MinimumQueueDescriptor, + armnn::StringMapping::RefMinimumWorkload_Execute>; -- cgit v1.2.1