From b4540bef0b0327683fe8e63f727c1212800dc2a9 Mon Sep 17 00:00:00 2001 From: David Beck Date: Mon, 24 Sep 2018 13:18:27 +0100 Subject: IVGCVSW-1898 : Ref backend folder structure * Reference backend is renamed to backends/reference as per https://confluence.arm.com/display/MLENG/Pluggable+backends Change-Id: I27a13c274eb60995dfb459e3c49c0e2f60bcd32c --- .../reference/workloads/RefArithmeticWorkload.cpp | 69 ++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 src/backends/reference/workloads/RefArithmeticWorkload.cpp (limited to 'src/backends/reference/workloads/RefArithmeticWorkload.cpp') diff --git a/src/backends/reference/workloads/RefArithmeticWorkload.cpp b/src/backends/reference/workloads/RefArithmeticWorkload.cpp new file mode 100644 index 0000000000..6c39fa1186 --- /dev/null +++ b/src/backends/reference/workloads/RefArithmeticWorkload.cpp @@ -0,0 +1,69 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefArithmeticWorkload.hpp" +#include "ArithmeticFunction.hpp" +#include "RefWorkloadUtils.hpp" +#include "Profiling.hpp" +#include + +namespace armnn +{ + +template +void BaseFloat32ArithmeticWorkload::ExecuteImpl(const char * debugString) const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); + + auto data = Float32Workload::GetData(); + const TensorShape& inShape0 = GetTensorInfo(data.m_Inputs[0]).GetShape(); + const TensorShape& inShape1 = GetTensorInfo(data.m_Inputs[1]).GetShape(); + const TensorShape& outShape = GetTensorInfo(data.m_Outputs[0]).GetShape(); + + const float* inData0 = GetInputTensorDataFloat(0, data); + const float* inData1 = GetInputTensorDataFloat(1, data); + float* outData = GetOutputTensorDataFloat(0, data); + + ArithmeticFunction(inShape0, inShape1, outShape, inData0, inData1, outData); +} + +template +void BaseUint8ArithmeticWorkload::ExecuteImpl(const char * debugString) const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); + + auto data = Uint8Workload::GetData(); + const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); + + auto dequant0 = Dequantize(GetInputTensorDataU8(0, data), inputInfo0); + auto dequant1 = Dequantize(GetInputTensorDataU8(1, data), inputInfo1); + + std::vector results(outputInfo.GetNumElements()); + + ArithmeticFunction(inputInfo0.GetShape(), + inputInfo1.GetShape(), + outputInfo.GetShape(), + dequant0.data(), + dequant1.data(), + results.data()); + + Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo); +} + +} + +template class armnn::BaseFloat32ArithmeticWorkload>; +template class armnn::BaseUint8ArithmeticWorkload>; + +template class armnn::BaseFloat32ArithmeticWorkload>; +template class armnn::BaseUint8ArithmeticWorkload>; + +template class armnn::BaseFloat32ArithmeticWorkload>; +template class armnn::BaseUint8ArithmeticWorkload>; + +template class armnn::BaseFloat32ArithmeticWorkload>; +template class armnn::BaseUint8ArithmeticWorkload>; -- cgit v1.2.1