From 5e51cd803c7d926b2a9bd7c680c8d59fe100c2f2 Mon Sep 17 00:00:00 2001 From: Keith Davis Date: Wed, 29 Jan 2020 16:52:59 +0000 Subject: IVGCVSW-4390 Refactor QUANTIZE to make use of Decoder/Encoder types * Add no-ops for CL/NEON Uint8 * Refactor Quantize workload to Decoder/Encoder types Signed-off-by: Keith Davis Change-Id: I80b09de528299b925e2ac38acd9a5019b8d3e4ac --- .../reference/workloads/RefQuantizeWorkload.cpp | 55 ++++++++-------------- .../reference/workloads/RefQuantizeWorkload.hpp | 10 ++-- 2 files changed, 26 insertions(+), 39 deletions(-) (limited to 'src/backends/reference/workloads') diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp index ab2ee7fc4e..2eef5f33db 100644 --- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp +++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp @@ -5,6 +5,8 @@ #include "RefQuantizeWorkload.hpp" +#include "RefWorkloadUtils.hpp" + #include @@ -14,14 +16,13 @@ namespace armnn namespace { -template -void QuantizeImpl(const void *input, void *output, size_t numValues, float scale, int offset) +void QuantizeImpl(Decoder& in, Encoder& out, size_t numValues) { - auto in = static_cast(input); - auto out = static_cast(output); - for (size_t i = 0; i < numValues; i++, in++, out++) + for (unsigned int i = 0; i < numValues; i++) { - *out = armnn::Quantize(*in, scale, offset); + in[i]; + out[i]; + out.Set(in.Get()); } } @@ -30,42 +31,24 @@ void QuantizeImpl(const void *input, void *output, size_t numValues, float scale RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info) : BaseWorkload(descriptor, info) , m_NumElements(info.m_InputTensorInfos[0].GetNumElements()) - , m_TargetType(info.m_OutputTensorInfos[0].GetDataType()) - , m_Scale(info.m_OutputTensorInfos[0].GetQuantizationScale()) - , m_Offset(info.m_OutputTensorInfos[0].GetQuantizationOffset()) { } -void RefQuantizeWorkload::Execute() const +void RefQuantizeWorkload::PostAllocationConfigure() { - const void* input = m_Data.m_Inputs[0]->Map(true); - void* output = m_Data.m_Outputs[0]->Map(true); + const TensorInfo& inputInfo = armnn::GetTensorInfo(m_Data.m_Inputs[0]); + m_InputDecoder = MakeDecoder(inputInfo); - switch(m_TargetType) - { - case DataType::QAsymmU8: - { - QuantizeImpl(input, output, m_NumElements, m_Scale, m_Offset); - break; - } - case DataType::QSymmS8: - { - QuantizeImpl(input, output, m_NumElements, m_Scale, 0); - break; - } - case DataType::QSymmS16: - { - QuantizeImpl(input, output, m_NumElements, m_Scale, 0); - break; - } - default: - { - BOOST_ASSERT_MSG(false, "RefQuantizeWorkload: Non quantized output type encountered"); - } - } + const TensorInfo& outputInfo = armnn::GetTensorInfo(m_Data.m_Outputs[0]); + m_OutputEncoder = MakeEncoder(outputInfo); +} + +void RefQuantizeWorkload::Execute() const +{ + m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map()); + m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map()); - m_Data.m_Inputs[0]->Unmap(); - m_Data.m_Outputs[0]->Unmap(); + QuantizeImpl(*m_InputDecoder, *m_OutputEncoder, m_NumElements); } } //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.hpp b/src/backends/reference/workloads/RefQuantizeWorkload.hpp index 6a43b8471d..9ae107607b 100644 --- a/src/backends/reference/workloads/RefQuantizeWorkload.hpp +++ b/src/backends/reference/workloads/RefQuantizeWorkload.hpp @@ -7,6 +7,8 @@ #include #include +#include "Decoders.hpp" +#include "Encoders.hpp" namespace armnn { @@ -14,13 +16,15 @@ class RefQuantizeWorkload : public BaseWorkload { public: RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info); + void PostAllocationConfigure() override; void Execute() const override; private: + + std::unique_ptr> m_InputDecoder; + std::unique_ptr> m_OutputEncoder; + size_t m_NumElements; - armnn::DataType m_TargetType; - float m_Scale; - int m_Offset; }; } //namespace armnn \ No newline at end of file -- cgit v1.2.1