aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefQuantizeWorkload.cpp
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-03-25 15:41:58 +0000
committerDerek Lamberti <derek.lamberti@arm.com>2019-04-01 13:47:10 +0100
commit5f400d6d23b463ca810180b45dd84c3f99b24690 (patch)
treebbdc67d931cfc989ac4bf51eb1b81aa4e18aafc1 /src/backends/reference/workloads/RefQuantizeWorkload.cpp
parent8a54ac09dc4b9be1c1826bc63d4baf5101dafd53 (diff)
downloadarmnn-5f400d6d23b463ca810180b45dd84c3f99b24690.tar.gz
IVGCVSW-2871 Ref QuantizeLayer workload
Change-Id: If048b2a053c542b31ae344fe0af04d9b4f40eb6d Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/backends/reference/workloads/RefQuantizeWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp66
1 files changed, 66 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
new file mode 100644
index 0000000000..b7ace32e14
--- /dev/null
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -0,0 +1,66 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefQuantizeWorkload.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+
+namespace armnn
+{
+
+namespace
+{
+
+template<typename T>
+void QuantizeImpl(const void *input, void *output, size_t numValues, float scale, int offset)
+{
+ auto in = static_cast<const float *>(input);
+ auto out = static_cast<T *>(output);
+ for (size_t i = 0; i < numValues; i++, in++, out++)
+ {
+ *out = armnn::Quantize<T>(*in, scale, offset);
+ }
+}
+
+} //namespace
+
+RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info)
+ : BaseWorkload(descriptor, info)
+ , m_NumElements(info.m_InputTensorInfos[0].GetNumElements())
+ , m_TargetType(info.m_OutputTensorInfos[0].GetDataType())
+ , m_Scale(info.m_OutputTensorInfos[0].GetQuantizationScale())
+ , m_Offset(info.m_OutputTensorInfos[0].GetQuantizationOffset())
+{
+}
+
+void RefQuantizeWorkload::Execute() const
+{
+ const void* input = m_Data.m_Inputs[0]->Map(true);
+ void* output = m_Data.m_Outputs[0]->Map(true);
+
+ switch(m_TargetType)
+ {
+ case DataType::QuantisedAsymm8:
+ {
+ QuantizeImpl<uint8_t>(input, output, m_NumElements, m_Scale, m_Offset);
+ break;
+ }
+ case DataType::QuantisedSymm16:
+ {
+ QuantizeImpl<int16_t>(input, output, m_NumElements, m_Scale, 0);
+ break;
+ }
+ default:
+ {
+ BOOST_ASSERT_MSG(false, "RefQuantizeWorkload: Non quantized output type encountered");
+ }
+ }
+
+ m_Data.m_Inputs[0]->Unmap();
+ m_Data.m_Outputs[0]->Unmap();
+}
+
+} //namespace armnn \ No newline at end of file