aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-09-12 13:50:03 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-01 14:56:47 +0100
commit279f8721824b104def48b426447fb1766d794e8e (patch)
tree4673d08bf595ceef305ce01df7eb0a4e662dbd16
parent0a710c4c44be908a93a318e1fbd5c3535e849293 (diff)
downloadarmnn-279f8721824b104def48b426447fb1766d794e8e.tar.gz
IVGCVSW-1843 : remove duplicate code for Ref Arithmetic workloads
Change-Id: If94d7b7b06a8c4e2c155b2ab470604a8d20d1027
-rw-r--r--Android.mk10
-rw-r--r--CMakeLists.txt20
-rw-r--r--src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp3
-rw-r--r--src/armnn/backends/RefWorkloads.hpp9
-rw-r--r--src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp31
-rw-r--r--src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp41
-rw-r--r--src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp69
-rw-r--r--src/armnn/backends/RefWorkloads/RefArithmeticWorkload.hpp122
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp31
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp41
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp31
-rw-r--r--src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp41
-rw-r--r--src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp31
-rw-r--r--src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp41
-rw-r--r--src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp21
-rw-r--r--src/armnn/backends/StringMapping.cpp17
-rw-r--r--src/armnn/backends/StringMapping.hpp49
24 files changed, 264 insertions, 491 deletions
diff --git a/Android.mk b/Android.mk
index 6f7771c73c..9272aef4c4 100644
--- a/Android.mk
+++ b/Android.mk
@@ -124,16 +124,12 @@ LOCAL_SRC_FILES := \
src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp \
- src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp \
src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp \
src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/Broadcast.cpp \
src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp \
- src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp \
- src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp \
- src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp \
- src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp \
+ src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp \
src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/ResizeBilinear.cpp \
src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.cpp \
@@ -151,7 +147,6 @@ LOCAL_SRC_FILES := \
src/armnn/backends/RefWorkloads/RefActivationUint8Workload.cpp \
src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.cpp \
src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.cpp \
- src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp \
src/armnn/backends/RefWorkloads/Softmax.cpp \
@@ -164,8 +159,6 @@ LOCAL_SRC_FILES := \
src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp \
src/armnn/backends/RefWorkloads/RefConvertFp16ToFp32Workload.cpp \
src/armnn/backends/RefWorkloads/RefConvertFp32ToFp16Workload.cpp \
- src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp \
- src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp \
src/armnn/backends/MemCopyWorkload.cpp \
src/armnn/backends/WorkloadData.cpp \
src/armnn/backends/WorkloadFactory.cpp \
@@ -219,6 +212,7 @@ LOCAL_SRC_FILES := \
src/armnn/Utils.cpp \
src/armnn/LayerSupport.cpp \
src/armnn/Observable.cpp \
+ src/armnn/backends/StringMapping.cpp \
src/armnn/backends/RefLayerSupport.cpp \
src/armnn/backends/ClLayerSupport.cpp \
src/armnn/backends/NeonLayerSupport.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d166a718fc..4453a85cd2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -182,23 +182,18 @@ list(APPEND armnn_sources
src/armnn/backends/WorkloadUtils.hpp
src/armnn/backends/MemCopyWorkload.cpp
src/armnn/backends/MemCopyWorkload.hpp
+ src/armnn/backends/StringMapping.cpp
+ src/armnn/backends/StringMapping.hpp
src/armnn/backends/RefWorkloads/Broadcast.hpp
src/armnn/backends/RefWorkloads/Broadcast.cpp
src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp
src/armnn/backends/RefWorkloads/RefConstantUint8Workload.hpp
src/armnn/backends/RefWorkloads/ArithmeticFunction.cpp
src/armnn/backends/RefWorkloads/ArithmeticFunction.hpp
- src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
- src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp
- src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
- src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp
- src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
- src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp
- src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
- src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp
+ src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp
+ src/armnn/backends/RefWorkloads/RefArithmeticWorkload.hpp
src/armnn/backends/RefWorkloads/ConvImpl.hpp
src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.cpp
- src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp
src/armnn/backends/RefWorkloads/FullyConnected.cpp
src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.cpp
src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp
@@ -208,7 +203,6 @@ list(APPEND armnn_sources
src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp
src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp
src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp
- src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp
src/armnn/backends/RefWorkloads/RefActivationUint8Workload.hpp
src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp
@@ -241,7 +235,6 @@ list(APPEND armnn_sources
src/armnn/backends/RefWorkloads/Activation.cpp
src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp
src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.cpp
- src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp
src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.cpp
src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.cpp
src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp
@@ -260,7 +253,6 @@ list(APPEND armnn_sources
src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.hpp
src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.hpp
src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.hpp
- src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp
src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp
src/armnn/backends/RefWorkloads/Softmax.cpp
@@ -281,10 +273,6 @@ list(APPEND armnn_sources
src/armnn/backends/RefWorkloads/RefConvertFp16ToFp32Workload.hpp
src/armnn/backends/RefWorkloads/RefConvertFp32ToFp16Workload.cpp
src/armnn/backends/RefWorkloads/RefConvertFp32ToFp16Workload.hpp
- src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
- src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp
- src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
- src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp
src/armnn/layers/LayerCloneBase.hpp
src/armnn/layers/LayerWithParameters.hpp
src/armnn/layers/ActivationLayer.hpp
diff --git a/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp b/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp
index 5858ebd6eb..ad9a1aee68 100644
--- a/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp
+++ b/src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp
@@ -18,9 +18,6 @@ arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo& input0,
const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
- // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
- // ignored for F32 tensors.
return arm_compute::CLArithmeticDivision::validate(&aclInput1, &aclInput2, &aclOutput);
}
diff --git a/src/armnn/backends/RefWorkloads.hpp b/src/armnn/backends/RefWorkloads.hpp
index e58d4accbb..746a59e71f 100644
--- a/src/armnn/backends/RefWorkloads.hpp
+++ b/src/armnn/backends/RefWorkloads.hpp
@@ -7,8 +7,8 @@
#include "backends/RefWorkloads/RefConstantUint8Workload.hpp"
#include "backends/RefWorkloads/ArithmeticFunction.hpp"
+#include "backends/RefWorkloads/RefArithmeticWorkload.hpp"
#include "backends/RefWorkloads/ConvImpl.hpp"
-#include "backends/RefWorkloads/RefMultiplicationUint8Workload.hpp"
#include "backends/RefWorkloads/RefBaseConstantWorkload.hpp"
#include "backends/RefWorkloads/RefConvolution2dUint8Workload.hpp"
#include "backends/RefWorkloads/RefSplitterUint8Workload.hpp"
@@ -33,8 +33,6 @@
#include "backends/RefWorkloads/RefSoftmaxUint8Workload.hpp"
#include "backends/RefWorkloads/RefReshapeUint8Workload.hpp"
#include "backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp"
-#include "backends/RefWorkloads/RefAdditionUint8Workload.hpp"
-#include "backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp"
#include "backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp"
#include "backends/RefWorkloads/ResizeBilinear.hpp"
#include "backends/RefWorkloads/RefNormalizationFloat32Workload.hpp"
@@ -47,14 +45,9 @@
#include "backends/RefWorkloads/RefConstantFloat32Workload.hpp"
#include "backends/RefWorkloads/RefActivationFloat32Workload.hpp"
#include "backends/RefWorkloads/RefConvolution2dFloat32Workload.hpp"
-#include "backends/RefWorkloads/RefAdditionFloat32Workload.hpp"
#include "backends/RefWorkloads/Pooling2d.hpp"
#include "backends/RefWorkloads/RefFakeQuantizationFloat32Workload.hpp"
#include "backends/RefWorkloads/RefPermuteWorkload.hpp"
#include "backends/RefWorkloads/RefLstmFloat32Workload.hpp"
#include "backends/RefWorkloads/RefConvertFp16ToFp32Workload.hpp"
#include "backends/RefWorkloads/RefConvertFp32ToFp16Workload.hpp"
-#include "backends/RefWorkloads/RefDivisionFloat32Workload.hpp"
-#include "backends/RefWorkloads/RefDivisionUint8Workload.hpp"
-#include "backends/RefWorkloads/RefSubtractionFloat32Workload.hpp"
-#include "backends/RefWorkloads/RefSubtractionUint8Workload.hpp"
diff --git a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
deleted file mode 100644
index 21c7533c0f..0000000000
--- a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefAdditionFloat32Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefAdditionFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAdditionFloat32Workload_Execute");
-
- const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape();
- const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape();
- const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape();
-
- const float* inData0 = GetInputTensorDataFloat(0, m_Data);
- const float* inData1 = GetInputTensorDataFloat(1, m_Data);
- float* outData = GetOutputTensorDataFloat(0, m_Data);
-
- ArithmeticFunction<std::plus<float>>(inShape0, inShape1, outShape, inData0, inData1, outData);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp
deleted file mode 100644
index 6250bb64aa..0000000000
--- a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefAdditionFloat32Workload : public Float32Workload<AdditionQueueDescriptor>
-{
-public:
- using Float32Workload<AdditionQueueDescriptor>::Float32Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
deleted file mode 100644
index 116a5f14cb..0000000000
--- a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefAdditionUint8Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefAdditionUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAdditionUint8Workload_Execute");
-
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
- auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
- auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
-
- std::vector<float> results(outputInfo.GetNumElements());
-
- ArithmeticFunction<std::plus<float>>(inputInfo0.GetShape(),
- inputInfo1.GetShape(),
- outputInfo.GetShape(),
- dequant0.data(),
- dequant1.data(),
- results.data());
-
- Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp
deleted file mode 100644
index 0701681e63..0000000000
--- a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefAdditionUint8Workload : public Uint8Workload<AdditionQueueDescriptor>
-{
-public:
- using Uint8Workload<AdditionQueueDescriptor>::Uint8Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp b/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp
new file mode 100644
index 0000000000..6c39fa1186
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.cpp
@@ -0,0 +1,69 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefArithmeticWorkload.hpp"
+#include "ArithmeticFunction.hpp"
+#include "RefWorkloadUtils.hpp"
+#include "Profiling.hpp"
+#include <vector>
+
+namespace armnn
+{
+
+template <typename ParentDescriptor, typename Functor>
+void BaseFloat32ArithmeticWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString);
+
+ auto data = Float32Workload<ParentDescriptor>::GetData();
+ const TensorShape& inShape0 = GetTensorInfo(data.m_Inputs[0]).GetShape();
+ const TensorShape& inShape1 = GetTensorInfo(data.m_Inputs[1]).GetShape();
+ const TensorShape& outShape = GetTensorInfo(data.m_Outputs[0]).GetShape();
+
+ const float* inData0 = GetInputTensorDataFloat(0, data);
+ const float* inData1 = GetInputTensorDataFloat(1, data);
+ float* outData = GetOutputTensorDataFloat(0, data);
+
+ ArithmeticFunction<Functor>(inShape0, inShape1, outShape, inData0, inData1, outData);
+}
+
+template <typename ParentDescriptor, typename Functor>
+void BaseUint8ArithmeticWorkload<ParentDescriptor, Functor>::ExecuteImpl(const char * debugString) const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, debugString);
+
+ auto data = Uint8Workload<ParentDescriptor>::GetData();
+ const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(data.m_Inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
+
+ auto dequant0 = Dequantize(GetInputTensorDataU8(0, data), inputInfo0);
+ auto dequant1 = Dequantize(GetInputTensorDataU8(1, data), inputInfo1);
+
+ std::vector<float> results(outputInfo.GetNumElements());
+
+ ArithmeticFunction<Functor>(inputInfo0.GetShape(),
+ inputInfo1.GetShape(),
+ outputInfo.GetShape(),
+ dequant0.data(),
+ dequant1.data(),
+ results.data());
+
+ Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo);
+}
+
+}
+
+template class armnn::BaseFloat32ArithmeticWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>;
+template class armnn::BaseUint8ArithmeticWorkload<armnn::AdditionQueueDescriptor, std::plus<float>>;
+
+template class armnn::BaseFloat32ArithmeticWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>;
+template class armnn::BaseUint8ArithmeticWorkload<armnn::SubtractionQueueDescriptor, std::minus<float>>;
+
+template class armnn::BaseFloat32ArithmeticWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>;
+template class armnn::BaseUint8ArithmeticWorkload<armnn::MultiplicationQueueDescriptor, std::multiplies<float>>;
+
+template class armnn::BaseFloat32ArithmeticWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>;
+template class armnn::BaseUint8ArithmeticWorkload<armnn::DivisionQueueDescriptor, std::divides<float>>;
diff --git a/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.hpp b/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.hpp
new file mode 100644
index 0000000000..7197b7a883
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefArithmeticWorkload.hpp
@@ -0,0 +1,122 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Types.hpp>
+#include "backends/StringMapping.hpp"
+#include "backends/Workload.hpp"
+#include "backends/WorkloadData.hpp"
+
+namespace armnn
+{
+
+template <typename Functor,
+ typename armnn::DataType DataType,
+ typename ParentDescriptor,
+ typename armnn::StringMapping::Id DebugString>
+class RefArithmeticWorkload
+{
+ // Needs specialization. The default is empty on purpose.
+};
+
+template <typename ParentDescriptor, typename Functor>
+class BaseFloat32ArithmeticWorkload : public Float32Workload<ParentDescriptor>
+{
+public:
+ using Float32Workload<ParentDescriptor>::Float32Workload;
+ void ExecuteImpl(const char * debugString) const;
+};
+
+template <typename Functor,
+ typename ParentDescriptor,
+ typename armnn::StringMapping::Id DebugString>
+class RefArithmeticWorkload<Functor, armnn::DataType::Float32, ParentDescriptor, DebugString>
+ : public BaseFloat32ArithmeticWorkload<ParentDescriptor, Functor>
+{
+public:
+ using BaseFloat32ArithmeticWorkload<ParentDescriptor, Functor>::BaseFloat32ArithmeticWorkload;
+
+ virtual void Execute() const override
+ {
+ using Parent = BaseFloat32ArithmeticWorkload<ParentDescriptor, Functor>;
+ Parent::ExecuteImpl(StringMapping::Instance().Get(DebugString));
+ }
+};
+
+template <typename ParentDescriptor, typename Functor>
+class BaseUint8ArithmeticWorkload : public Uint8Workload<ParentDescriptor>
+{
+public:
+ using Uint8Workload<ParentDescriptor>::Uint8Workload;
+ void ExecuteImpl(const char * debugString) const;
+};
+
+template <typename Functor,
+ typename ParentDescriptor,
+ typename armnn::StringMapping::Id DebugString>
+class RefArithmeticWorkload<Functor, armnn::DataType::QuantisedAsymm8, ParentDescriptor, DebugString>
+ : public BaseUint8ArithmeticWorkload<ParentDescriptor, Functor>
+{
+public:
+ using BaseUint8ArithmeticWorkload<ParentDescriptor, Functor>::BaseUint8ArithmeticWorkload;
+
+ virtual void Execute() const override
+ {
+ using Parent = BaseUint8ArithmeticWorkload<ParentDescriptor, Functor>;
+ Parent::ExecuteImpl(StringMapping::Instance().Get(DebugString));
+ }
+};
+
+using RefAdditionFloat32Workload =
+ RefArithmeticWorkload<std::plus<float>,
+ DataType::Float32,
+ AdditionQueueDescriptor,
+ StringMapping::RefAdditionWorkload_Execute>;
+
+using RefAdditionUint8Workload =
+ RefArithmeticWorkload<std::plus<float>,
+ DataType::QuantisedAsymm8,
+ AdditionQueueDescriptor,
+ StringMapping::RefAdditionWorkload_Execute>;
+
+
+using RefSubtractionFloat32Workload =
+ RefArithmeticWorkload<std::minus<float>,
+ DataType::Float32,
+ SubtractionQueueDescriptor,
+ StringMapping::RefSubtractionWorkload_Execute>;
+
+using RefSubtractionUint8Workload =
+ RefArithmeticWorkload<std::minus<float>,
+ DataType::QuantisedAsymm8,
+ SubtractionQueueDescriptor,
+ StringMapping::RefSubtractionWorkload_Execute>;
+
+using RefMultiplicationFloat32Workload =
+ RefArithmeticWorkload<std::multiplies<float>,
+ DataType::Float32,
+ MultiplicationQueueDescriptor,
+ StringMapping::RefMultiplicationWorkload_Execute>;
+
+using RefMultiplicationUint8Workload =
+ RefArithmeticWorkload<std::multiplies<float>,
+ DataType::QuantisedAsymm8,
+ MultiplicationQueueDescriptor,
+ StringMapping::RefMultiplicationWorkload_Execute>;
+
+using RefDivisionFloat32Workload =
+ RefArithmeticWorkload<std::divides<float>,
+ DataType::Float32,
+ DivisionQueueDescriptor,
+ StringMapping::RefDivisionWorkload_Execute>;
+
+using RefDivisionUint8Workload =
+ RefArithmeticWorkload<std::divides<float>,
+ DataType::QuantisedAsymm8,
+ DivisionQueueDescriptor,
+ StringMapping::RefDivisionWorkload_Execute>;
+
+} // armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
deleted file mode 100644
index 28c90610de..0000000000
--- a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefDivisionFloat32Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefDivisionFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDivisionFloat32Workload_Execute");
-
- const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape();
- const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape();
- const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape();
-
- float* outputData = GetOutputTensorDataFloat(0, m_Data);
- const float* inputData0 = GetInputTensorDataFloat(0, m_Data);
- const float* inputData1 = GetInputTensorDataFloat(1, m_Data);
-
- ArithmeticFunction<std::divides<float>>(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp
deleted file mode 100644
index 4af0b619a0..0000000000
--- a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefDivisionFloat32Workload : public Float32Workload<DivisionQueueDescriptor>
-{
-public:
- using Float32Workload<DivisionQueueDescriptor>::Float32Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
deleted file mode 100644
index d10d874137..0000000000
--- a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefDivisionUint8Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefDivisionUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDivisionUint8Workload_Execute");
-
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
- auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
- auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
-
- std::vector<float> results(outputInfo.GetNumElements());
-
- ArithmeticFunction<std::divides<float>>(inputInfo0.GetShape(),
- inputInfo1.GetShape(),
- outputInfo.GetShape(),
- dequant0.data(),
- dequant1.data(),
- results.data());
-
- Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp
deleted file mode 100644
index 9c9f764551..0000000000
--- a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefDivisionUint8Workload : public Uint8Workload<DivisionQueueDescriptor>
-{
-public:
- using Uint8Workload<DivisionQueueDescriptor>::Uint8Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
deleted file mode 100644
index 0b36f0ff00..0000000000
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefMultiplicationFloat32Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefMultiplicationFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMultiplicationFloat32Workload_Execute");
-
- const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape();
- const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape();
- const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape();
-
- float* outputData = GetOutputTensorDataFloat(0, m_Data);
- const float* inputData0 = GetInputTensorDataFloat(0, m_Data);
- const float* inputData1 = GetInputTensorDataFloat(1, m_Data);
-
- ArithmeticFunction<std::multiplies<float>>(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp
deleted file mode 100644
index ba38ee04a9..0000000000
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefMultiplicationFloat32Workload : public Float32Workload<MultiplicationQueueDescriptor>
-{
-public:
- using Float32Workload<MultiplicationQueueDescriptor>::Float32Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
deleted file mode 100644
index b929a53808..0000000000
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefMultiplicationUint8Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefMultiplicationUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMultiplicationUint8Workload_Execute");
-
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
- auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
- auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
-
- std::vector<float> results(outputInfo.GetNumElements());
-
- ArithmeticFunction<std::multiplies<float>>(inputInfo0.GetShape(),
- inputInfo1.GetShape(),
- outputInfo.GetShape(),
- dequant0.data(),
- dequant1.data(),
- results.data());
-
- Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp
deleted file mode 100644
index 2a37128d57..0000000000
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefMultiplicationUint8Workload : public Uint8Workload<MultiplicationQueueDescriptor>
-{
-public:
- using Uint8Workload<MultiplicationQueueDescriptor>::Uint8Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
deleted file mode 100644
index f1840c347b..0000000000
--- a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefSubtractionFloat32Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-namespace armnn
-{
-
-void RefSubtractionFloat32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSubtractionFloat32Workload_Execute");
-
- const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape();
- const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape();
- const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape();
-
- const float* inData0 = GetInputTensorDataFloat(0, m_Data);
- const float* inData1 = GetInputTensorDataFloat(1, m_Data);
- float* outData = GetOutputTensorDataFloat(0, m_Data);
-
- ArithmeticFunction<std::minus<float>>(inShape0, inShape1, outShape, inData0, inData1, outData);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp
deleted file mode 100644
index b3f5ed9474..0000000000
--- a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefSubtractionFloat32Workload : public Float32Workload<SubtractionQueueDescriptor>
-{
-public:
- using Float32Workload<SubtractionQueueDescriptor>::Float32Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
deleted file mode 100644
index 1affbdd8b1..0000000000
--- a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefSubtractionUint8Workload.hpp"
-
-#include "ArithmeticFunction.hpp"
-#include "RefWorkloadUtils.hpp"
-
-#include "Profiling.hpp"
-
-#include <vector>
-
-namespace armnn
-{
-
-void RefSubtractionUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSubtractionUint8Workload_Execute");
-
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
- auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
- auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
-
- std::vector<float> results(outputInfo.GetNumElements());
-
- ArithmeticFunction<std::minus<float>>(inputInfo0.GetShape(),
- inputInfo1.GetShape(),
- outputInfo.GetShape(),
- dequant0.data(),
- dequant1.data(),
- results.data());
-
- Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
-}
-
-} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp
deleted file mode 100644
index 582533253b..0000000000
--- a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/Workload.hpp"
-#include "backends/WorkloadData.hpp"
-
-namespace armnn
-{
-
-class RefSubtractionUint8Workload : public Uint8Workload<SubtractionQueueDescriptor>
-{
-public:
- using Uint8Workload<SubtractionQueueDescriptor>::Uint8Workload;
- virtual void Execute() const override;
-};
-
-} //namespace armnn
diff --git a/src/armnn/backends/StringMapping.cpp b/src/armnn/backends/StringMapping.cpp
new file mode 100644
index 0000000000..3ca8843812
--- /dev/null
+++ b/src/armnn/backends/StringMapping.cpp
@@ -0,0 +1,17 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StringMapping.hpp"
+
+namespace armnn
+{
+
+const StringMapping& StringMapping::Instance()
+{
+ static StringMapping instance;
+ return instance;
+}
+
+} // armnn
diff --git a/src/armnn/backends/StringMapping.hpp b/src/armnn/backends/StringMapping.hpp
new file mode 100644
index 0000000000..6312e68945
--- /dev/null
+++ b/src/armnn/backends/StringMapping.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+///
+/// StringMapping is helper class to be able to use strings as template
+/// parameters, so this allows simplifying code which only differs in
+/// a string, such as a debug string literal.
+///
+struct StringMapping
+{
+public:
+ enum Id {
+ RefAdditionWorkload_Execute,
+ RefSubtractionWorkload_Execute,
+ RefMultiplicationWorkload_Execute,
+ RefDivisionWorkload_Execute,
+ MAX_STRING_ID
+ };
+
+ const char * Get(Id id) const
+ {
+ return m_Strings[id];
+ }
+
+ static const StringMapping& Instance();
+
+private:
+ StringMapping()
+ {
+ m_Strings[RefAdditionWorkload_Execute] = "RefAdditionWorkload_Execute";
+ m_Strings[RefSubtractionWorkload_Execute] = "RefSubtractionWorkload_Execute";
+ m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
+ m_Strings[RefDivisionWorkload_Execute] = "RefDivisionWorkload_Execute";
+ }
+
+ StringMapping(const StringMapping &) = delete;
+ StringMapping& operator=(const StringMapping &) = delete;
+
+ const char * m_Strings[MAX_STRING_ID];
+};
+
+} //namespace armnn \ No newline at end of file