diff options
author | Éanna Ó Catháin <eanna.ocathain@arm.com> | 2018-11-28 16:24:38 +0000 |
---|---|---|
committer | Éanna Ó Catháin <eanna.ocathain@arm.com> | 2018-11-28 16:24:38 +0000 |
commit | d57415d9a2117da9cc5c58f8b5e39ba7455417d1 (patch) | |
tree | 7781ce03a1c3373121c6dff9d4eeb81fd306ad44 /src/backends/reference/workloads/RefArithmeticWorkload.cpp | |
parent | 5a4304a09fcbfd5fab4c73e5fd0d4cc9f3170395 (diff) | |
download | armnn-d57415d9a2117da9cc5c58f8b5e39ba7455417d1.tar.gz |
IVGCVSW-2202 Refactoring Arithmetic* names to Elementwise* names for workloads and workload functions
Change-Id: I6f3fce12a55f7d38ceafcdfcd6b5181bf56e2c09
Diffstat (limited to 'src/backends/reference/workloads/RefArithmeticWorkload.cpp')
-rw-r--r-- | src/backends/reference/workloads/RefArithmeticWorkload.cpp | 69 |
1 files changed, 0 insertions, 69 deletions
diff --git a/src/backends/reference/workloads/RefArithmeticWorkload.cpp b/src/backends/reference/workloads/RefArithmeticWorkload.cpp deleted file mode 100644 index 6c39fa1186..0000000000 --- a/src/backends/reference/workloads/RefArithmeticWorkload.cpp +++ /dev/null @@ -1,69 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefArithmeticWorkload.hpp" -#include "ArithmeticFunction.hpp" -#include "RefWorkloadUtils.hpp" -#include "Profiling.hpp" -#include <vector> - -namespace armnn -{ - -template <typename ParentDescriptor, typename Functor> -void BaseFloat32ArithmeticWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); - - auto data = Float32Workload<ParentDescriptor>::GetData(); - const TensorShape& inShape0 = GetTensorInfo(data.m_Inputs[0]).GetShape(); - const TensorShape& inShape1 = GetTensorInfo(data.m_Inputs[1]).GetShape(); - const TensorShape& outShape = GetTensorInfo(data.m_Outputs[0]).GetShape(); - - const float* inData0 = GetInputTensorDataFloat(0, data); - const float* inData1 = GetInputTensorDataFloat(1, data); - float* outData = GetOutputTensorDataFloat(0, data); - - ArithmeticFunction<Functor>(inShape0, inShape1, outShape, inData0, inData1, outData); -} - -template <typename ParentDescriptor, typename Functor> -void BaseUint8ArithmeticWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString); - - auto data = Uint8Workload<ParentDescriptor>::GetData(); - const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); - const TensorInfo& inputInfo1 = GetTensorInfo(data.m_Inputs[1]); - const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); - - auto dequant0 = Dequantize(GetInputTensorDataU8(0, data), inputInfo0); - auto dequant1 = Dequantize(GetInputTensorDataU8(1, data), inputInfo1); - - std::vector<float> results(outputInfo.GetNumElements()); - - ArithmeticFunction<Functor>(inputInfo0.GetShape(), - inputInfo1.GetShape(), - outputInfo.GetShape(), - dequant0.data(), - dequant1.data(), - results.data()); - - Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo); -} - -} - -template class armnn::BaseFloat32ArithmeticWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>; -template class armnn::BaseUint8ArithmeticWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>; - -template class armnn::BaseFloat32ArithmeticWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>; -template class armnn::BaseUint8ArithmeticWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>; - -template class armnn::BaseFloat32ArithmeticWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>; -template class armnn::BaseUint8ArithmeticWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>; - -template class armnn::BaseFloat32ArithmeticWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>; -template class armnn::BaseUint8ArithmeticWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>; |