aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefElementwiseWorkload.cpp
diff options
context:
space:
mode:
authorÉanna Ó Catháin <eanna.ocathain@arm.com>2018-11-28 16:24:38 +0000
committerÉanna Ó Catháin <eanna.ocathain@arm.com>2018-11-28 16:24:38 +0000
commitd57415d9a2117da9cc5c58f8b5e39ba7455417d1 (patch)
tree7781ce03a1c3373121c6dff9d4eeb81fd306ad44 /src/backends/reference/workloads/RefElementwiseWorkload.cpp
parent5a4304a09fcbfd5fab4c73e5fd0d4cc9f3170395 (diff)
downloadarmnn-d57415d9a2117da9cc5c58f8b5e39ba7455417d1.tar.gz
IVGCVSW-2202 Refactoring Arithmetic* names to Elementwise* names for workloads and workload functions
Change-Id: I6f3fce12a55f7d38ceafcdfcd6b5181bf56e2c09
Diffstat (limited to 'src/backends/reference/workloads/RefElementwiseWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp69
1 files changed, 69 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
new file mode 100644
index 0000000000..8e312a7dd1
--- /dev/null
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -0,0 +1,69 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefElementwiseWorkload.hpp"
+#include "ElementwiseFunction.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Profiling.hpp"
+#include <vector>
+
+namespace armnn
+{
+
+template <typename ParentDescriptor, typename Functor>
+void BaseFloat32ElementwiseWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString);
+
+ auto data = Float32Workload<ParentDescriptor>::GetData();
+ const TensorShape& inShape0 = GetTensorInfo(data.m_Inputs[0]).GetShape();
+ const TensorShape& inShape1 = GetTensorInfo(data.m_Inputs[1]).GetShape();
+ const TensorShape& outShape = GetTensorInfo(data.m_Outputs[0]).GetShape();
+
+ const float* inData0 = GetInputTensorDataFloat(0, data);
+ const float* inData1 = GetInputTensorDataFloat(1, data);
+ float* outData = GetOutputTensorDataFloat(0, data);
+
+ ElementwiseFunction<Functor>(inShape0, inShape1, outShape, inData0, inData1, outData);
+}
+
+template <typename ParentDescriptor, typename Functor>
+void BaseUint8ElementwiseWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString);
+
+ auto data = Uint8Workload<ParentDescriptor>::GetData();
+ const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(data.m_Inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
+
+ auto dequant0 = Dequantize(GetInputTensorDataU8(0, data), inputInfo0);
+ auto dequant1 = Dequantize(GetInputTensorDataU8(1, data), inputInfo1);
+
+ std::vector<float> results(outputInfo.GetNumElements());
+
+ ElementwiseFunction<Functor>(inputInfo0.GetShape(),
+ inputInfo1.GetShape(),
+ outputInfo.GetShape(),
+ dequant0.data(),
+ dequant1.data(),
+ results.data());
+
+ Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo);
+}
+
+}
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>;