aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2018-08-29 12:42:10 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-17 17:21:23 +0100
commite7a86a4a3363993fb41b1ea62f23b3643b8b0c78 (patch)
tree6d054cae92a13412129525e4f9ea441e7d8c6b73 /src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
parenta68241066c3e797dab70f515d2c55aaa74abf564 (diff)
downloadarmnn-e7a86a4a3363993fb41b1ea62f23b3643b8b0c78.tar.gz
IVGCVSW-1200 Division layer
*IVGCVSW-1772 Create QueueDescriptors *IVGCVSW-1773 Add a CL implementation of the DivisionWorkload *IVGCVSW-1774 Add Neon implementation of the DivisionWorkload *IVGCVSW-1775 Add a Ref implementation of the DivisionWorkload *IVGCVSW-1776 Add a Division Layer * Added simple division unit tests with broadcasting Change-Id: I05751fb7f868789f6c06f91e8d25e52b4f12ab5e
Diffstat (limited to 'src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp')
-rw-r--r--src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp37
1 files changed, 37 insertions, 0 deletions
diff --git a/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
new file mode 100644
index 0000000000..4354e70271
--- /dev/null
+++ b/src/armnn/backends/RefWorkloads/RefDivisionUint8Workload.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "RefDivisionUint8Workload.hpp"
+
+#include "Division.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+
+void RefDivisionUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDivisionUint8Workload_Execute");
+
+ const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
+ auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
+
+ std::vector<float> results(outputInfo.GetNumElements());
+ Division(
+ inputInfo0.GetShape(), inputInfo1.GetShape(), outputInfo.GetShape(),
+ dequant0.data(), dequant1.data(),results.data());
+
+ Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
+}
+
+} //namespace armnn