aboutsummaryrefslogtreecommitdiff
path: root/src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-09-19 12:03:20 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:56 +0100
commit10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab (patch)
tree1ac5b4f415531e2ef759439ab8e113f177bea7c5 /src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp
parenta3f165624b2cdfbced674af5a6e11856b1e746d9 (diff)
downloadarmnn-10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab.tar.gz
IVGCVSW-1897 : build infrastructure for the src/backends folder
Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb
Diffstat (limited to 'src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp')
-rw-r--r--src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp66
1 files changed, 66 insertions, 0 deletions
diff --git a/src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp b/src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp
new file mode 100644
index 0000000000..cd785d786c
--- /dev/null
+++ b/src/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp
@@ -0,0 +1,66 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefFullyConnectedUint8Workload.hpp"
+
+#include "FullyConnected.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include "Profiling.hpp"
+
+#include <vector>
+
+namespace armnn
+{
+RefFullyConnectedUint8Workload::RefFullyConnectedUint8Workload(
+ const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : Uint8Workload<FullyConnectedQueueDescriptor>(descriptor, info),
+ m_Weight(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight))),
+ m_Bias(descriptor.m_Parameters.m_BiasEnabled
+ ? std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias)) : nullptr) {}
+
+void RefFullyConnectedUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedUint8Workload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ const uint8_t* weightData = m_Weight->GetConstTensor<uint8_t>();
+
+ auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo);
+
+ auto weight = Dequantize(weightData, m_Weight->GetTensorInfo());
+
+ std::vector<float> results(outputInfo.GetNumElements());
+
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ const int32_t* biasData = m_Bias->GetConstTensor<int32_t>();
+ auto bias = Dequantize(biasData, m_Bias->GetTensorInfo());
+
+ FullyConnected(dequant.data(),
+ results.data(),
+ inputInfo,
+ outputInfo,
+ weight.data(),
+ bias.data(),
+ m_Data.m_Parameters.m_TransposeWeightMatrix);
+ }
+ else
+ {
+ FullyConnected(dequant.data(),
+ results.data(),
+ inputInfo,
+ outputInfo,
+ weight.data(),
+ nullptr,
+ m_Data.m_Parameters.m_TransposeWeightMatrix);
+ }
+
+ Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
+}
+
+} //namespace armnn