aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-09-24 13:18:27 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:57 +0100
commitb4540bef0b0327683fe8e63f727c1212800dc2a9 (patch)
treee1ea8bb6ee981640a1c469ceb556ed648ffde411 /src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp
parent2d9dd36fb6bc20b370701ab15463359b9db35f18 (diff)
downloadarmnn-b4540bef0b0327683fe8e63f727c1212800dc2a9.tar.gz
IVGCVSW-1898 : Ref backend folder structure
* Reference backend is renamed to backends/reference as per https://confluence.arm.com/display/MLENG/Pluggable+backends Change-Id: I27a13c274eb60995dfb459e3c49c0e2f60bcd32c
Diffstat (limited to 'src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp')
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp66
1 files changed, 66 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp b/src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp
new file mode 100644
index 0000000000..cd785d786c
--- /dev/null
+++ b/src/backends/reference/workloads/RefFullyConnectedUint8Workload.cpp
@@ -0,0 +1,66 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefFullyConnectedUint8Workload.hpp"
+
+#include "FullyConnected.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+RefFullyConnectedUint8Workload::RefFullyConnectedUint8Workload(
+ const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : Uint8Workload<FullyConnectedQueueDescriptor>(descriptor, info),
+ m_Weight(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight))),
+ m_Bias(descriptor.m_Parameters.m_BiasEnabled
+ ? std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias)) : nullptr) {}
+
+void RefFullyConnectedUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedUint8Workload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ const uint8_t* weightData = m_Weight->GetConstTensor<uint8_t>();
+
+ auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
+
+ auto weight = Dequantize(weightData, m_Weight->GetTensorInfo());
+
+ std::vector<float> results(outputInfo.GetNumElements());
+
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ const int32_t* biasData = m_Bias->GetConstTensor<int32_t>();
+ auto bias = Dequantize(biasData, m_Bias->GetTensorInfo());
+
+ FullyConnected(dequant.data(),
+ results.data(),
+ inputInfo,
+ outputInfo,
+ weight.data(),
+ bias.data(),
+ m_Data.m_Parameters.m_TransposeWeightMatrix);
+ }
+ else
+ {
+ FullyConnected(dequant.data(),
+ results.data(),
+ inputInfo,
+ outputInfo,
+ weight.data(),
+ nullptr,
+ m_Data.m_Parameters.m_TransposeWeightMatrix);
+ }
+
+ Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
+}
+
+} //namespace armnn