aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefMeanUint8Workload.cpp
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2018-09-28 11:07:51 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:57 +0100
commit1e4c31dafb1c8984a126fa1d211ed8f9eedaf7cc (patch)
tree006e40b3bbfdc4a202cdada8fa9afec0dd8fffae /src/backends/reference/workloads/RefMeanUint8Workload.cpp
parent33cea4db0b2729c5dbd50f9c0985578c60baffdd (diff)
downloadarmnn-1e4c31dafb1c8984a126fa1d211ed8f9eedaf7cc.tar.gz
IVGCVSW-1812 Adding Ref implementation and tests of MeanWorkloads
Change-Id: I6fb15c407024e3b91d5abf4513f8090be5821760
Diffstat (limited to 'src/backends/reference/workloads/RefMeanUint8Workload.cpp')
-rw-r--r--src/backends/reference/workloads/RefMeanUint8Workload.cpp39
1 files changed, 39 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/RefMeanUint8Workload.cpp b/src/backends/reference/workloads/RefMeanUint8Workload.cpp
new file mode 100644
index 0000000000..4ebffcfd70
--- /dev/null
+++ b/src/backends/reference/workloads/RefMeanUint8Workload.cpp
@@ -0,0 +1,39 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefMeanUint8Workload.hpp"
+
+#include "Mean.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+
+RefMeanUint8Workload::RefMeanUint8Workload(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info)
+ :Uint8Workload<MeanQueueDescriptor>(descriptor, info) {}
+
+
+void RefMeanUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanUint8Workload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
+
+ std::vector<float> results(outputInfo.GetNumElements());
+
+ Mean(inputInfo, outputInfo, m_Data.m_Parameters.m_Axis, dequant.data(), results.data());
+
+ Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
+}
+
+} //namespace armnn
+