diff options
author | David Beck <david.beck@arm.com> | 2018-09-12 13:50:03 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-01 14:56:47 +0100 |
commit | 279f8721824b104def48b426447fb1766d794e8e (patch) | |
tree | 4673d08bf595ceef305ce01df7eb0a4e662dbd16 /src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp | |
parent | 0a710c4c44be908a93a318e1fbd5c3535e849293 (diff) | |
download | armnn-279f8721824b104def48b426447fb1766d794e8e.tar.gz |
IVGCVSW-1843 : remove duplicate code for Ref Arithmetic workloads
Change-Id: If94d7b7b06a8c4e2c155b2ab470604a8d20d1027
Diffstat (limited to 'src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp')
-rw-r--r-- | src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp b/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp new file mode 100644 index 0000000000..6c39fa1186 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp @@ -0,0 +1,69 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefArithmeticWorkload.hpp" +#include "ArithmeticFunction.hpp" +#include "RefWorkloadUtils.hpp" +#include "Profiling.hpp" +#include <vector> + +namespace armnn +{ + +template <typename ParentDescriptor, typename Functor> +void BaseFloat32ArithmeticWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); + + auto data = Float32Workload<ParentDescriptor>::GetData(); + const TensorShape& inShape0 = GetTensorInfo(data.m_Inputs[0]).GetShape(); + const TensorShape& inShape1 = GetTensorInfo(data.m_Inputs[1]).GetShape(); + const TensorShape& outShape = GetTensorInfo(data.m_Outputs[0]).GetShape(); + + const float* inData0 = GetInputTensorDataFloat(0, data); + const float* inData1 = GetInputTensorDataFloat(1, data); + float* outData = GetOutputTensorDataFloat(0, data); + + ArithmeticFunction<Functor>(inShape0, inShape1, outShape, inData0, inData1, outData); +} + +template <typename ParentDescriptor, typename Functor> +void BaseUint8ArithmeticWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); + + auto data = Uint8Workload<ParentDescriptor>::GetData(); + const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); + + auto dequant0 = Dequantize(GetInputTensorDataU8(0, data), inputInfo0); + auto dequant1 = Dequantize(GetInputTensorDataU8(1, data), inputInfo1); + + std::vector<float> results(outputInfo.GetNumElements()); + + ArithmeticFunction<Functor>(inputInfo0.GetShape(), + inputInfo1.GetShape(), + outputInfo.GetShape(), + dequant0.data(), + dequant1.data(), + results.data()); + + Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo); +} + +} + +template class armnn::BaseFloat32ArithmeticWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>; +template class armnn::BaseUint8ArithmeticWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>; + +template class armnn::BaseFloat32ArithmeticWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>; +template class armnn::BaseUint8ArithmeticWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>; + +template class armnn::BaseFloat32ArithmeticWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>; +template class armnn::BaseUint8ArithmeticWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>; + +template class armnn::BaseFloat32ArithmeticWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>; +template class armnn::BaseUint8ArithmeticWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>; |