aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/RefWorkloads
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2018-08-29 12:42:10 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-17 17:21:23 +0100
commite7a86a4a3363993fb41b1ea62f23b3643b8b0c78 (patch)
tree6d054cae92a13412129525e4f9ea441e7d8c6b73 /src/armnn/backends/RefWorkloads
parenta68241066c3e797dab70f515d2c55aaa74abf564 (diff)
downloadarmnn-e7a86a4a3363993fb41b1ea62f23b3643b8b0c78.tar.gz
IVGCVSW-1200 Division layer
*IVGCVSW-1772 Create QueueDescriptors *IVGCVSW-1773 Add a CL implementation of the DivisionWorkload *IVGCVSW-1774 Add Neon implementation of the DivisionWorkload *IVGCVSW-1775 Add a Ref implementation of the DivisionWorkload *IVGCVSW-1776 Add a Division Layer * Added simple division unit tests with broadcasting Change-Id: I05751fb7f868789f6c06f91e8d25e52b4f12ab5e
Diffstat (limited to 'src/armnn/backends/RefWorkloads')
-rw-r--r--src/armnn/backends/RefWorkloads/Division.cpp52
-rw-r--r--src/armnn/backends/RefWorkloads/Division.hpp20
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp31
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp21
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp37
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp21
6 files changed, 182 insertions, 0 deletions
diff --git a/src/armnn/backends/RefWorkloads/Division.cpp b/src/armnn/backends/RefWorkloads/Division.cpp
new file mode 100644
index 0000000000..9837fea6b4
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/Division.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "Division.hpp"
+#include "Broadcast.hpp"
+
+#include <functional>
+
+namespace
+{
+
+void ElementwiseDivision(unsigned int numElements,
+ const float* inData0,
+ const float* inData1,
+ float* outData)
+{
+ for (unsigned int i = 0; i < numElements; ++i)
+ {
+ //TODO How to handle divide by 0
+ outData[i] = inData0[i] / inData1[i];
+ }
+}
+
+} // namespace
+
+namespace armnn
+{
+
+void Division(const TensorShape& inShape0,
+ const TensorShape& inShape1,
+ const TensorShape& outShape,
+ const float* inData0,
+ const float* inData1,
+ float* outData)
+{
+ if (inShape0 == inShape1)
+ {
+ ElementwiseDivision(inShape0.GetNumElements(), inData0, inData1, outData);
+ }
+ else
+ {
+ BroadcastLoop(inShape0, inShape1, outShape).Unroll(std::divides<float>(),
+ 0,
+ inData0,
+ inData1,
+ outData);
+ }
+}
+
+} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Division.hpp b/src/armnn/backends/RefWorkloads/Division.hpp
new file mode 100644
index 0000000000..d4c7e8dc8f
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/Division.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+namespace armnn
+{
+
+ void Division(const TensorShape& inShape0,
+ const TensorShape& inShape1,
+ const TensorShape& outShape,
+ const float* inData0,
+ const float* inData1,
+ float* outData);
+
+} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
new file mode 100644
index 0000000000..7cbd1fae5b
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.cpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "RefDivisionFloat32Workload.hpp"
+
+#include "Division.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+namespace armnn
+{
+
+void RefDivisionFloat32Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDivisionFloat32Workload_Execute");
+
+ const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape();
+ const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape();
+ const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape();
+
+ float* outputData = GetOutputTensorDataFloat(0, m_Data);
+ const float* inputData0 = GetInputTensorDataFloat(0, m_Data);
+ const float* inputData1 = GetInputTensorDataFloat(1, m_Data);
+
+ Division(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
+}
+
+} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp
new file mode 100644
index 0000000000..e31c255cff
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefDivisionFloat32Workload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include "backends/Workload.hpp"
+#include "backends/WorkloadData.hpp"
+
+namespace armnn
+{
+
+class RefDivisionFloat32Workload : public Float32Workload<DivisionQueueDescriptor>
+{
+public:
+ using Float32Workload<DivisionQueueDescriptor>::Float32Workload;
+ virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
new file mode 100644
index 0000000000..4354e70271
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "RefDivisionUint8Workload.hpp"
+
+#include "Division.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+
+void RefDivisionUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDivisionUint8Workload_Execute");
+
+ const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
+ auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
+
+ std::vector<float> results(outputInfo.GetNumElements());
+ Division(
+ inputInfo0.GetShape(), inputInfo1.GetShape(), outputInfo.GetShape(),
+ dequant0.data(), dequant1.data(),results.data());
+
+ Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
+}
+
+} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp
new file mode 100644
index 0000000000..d9e26ce3dd
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include "backends/Workload.hpp"
+#include "backends/WorkloadData.hpp"
+
+namespace armnn
+{
+
+class RefDivisionUint8Workload : public Uint8Workload<DivisionQueueDescriptor>
+{
+public:
+ using Uint8Workload<DivisionQueueDescriptor>::Uint8Workload;
+ virtual void Execute() const override;
+};
+
+} //namespace armnn