From 5f400d6d23b463ca810180b45dd84c3f99b24690 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Mon, 25 Mar 2019 15:41:58 +0000 Subject: IVGCVSW-2871 Ref QuantizeLayer workload Change-Id: If048b2a053c542b31ae344fe0af04d9b4f40eb6d Signed-off-by: Derek Lamberti --- src/armnn/layers/QuantizeLayer.cpp | 4 +- src/backends/reference/RefLayerSupport.cpp | 36 ++++++++++++ src/backends/reference/RefLayerSupport.hpp | 4 ++ src/backends/reference/RefWorkloadFactory.cpp | 7 ++- src/backends/reference/RefWorkloadFactory.hpp | 3 + src/backends/reference/backend.mk | 1 + src/backends/reference/workloads/CMakeLists.txt | 2 + .../reference/workloads/RefQuantizeWorkload.cpp | 66 ++++++++++++++++++++++ .../reference/workloads/RefQuantizeWorkload.hpp | 26 +++++++++ src/backends/reference/workloads/RefWorkloads.hpp | 2 + 10 files changed, 149 insertions(+), 2 deletions(-) create mode 100644 src/backends/reference/workloads/RefQuantizeWorkload.cpp create mode 100644 src/backends/reference/workloads/RefQuantizeWorkload.hpp (limited to 'src') diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp index fbf8b322ab..d5d76e2585 100644 --- a/src/armnn/layers/QuantizeLayer.cpp +++ b/src/armnn/layers/QuantizeLayer.cpp @@ -19,7 +19,9 @@ QuantizeLayer::QuantizeLayer(const char* name) std::unique_ptr QuantizeLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const { - return nullptr; + QuantizeQueueDescriptor descriptor; + WorkloadInfo info = PrepInfoAndDesc(descriptor, graph); + return factory.CreateQuantize(descriptor, info); } Layer* QuantizeLayer::Clone(Graph& graph) const diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 532c8eaf98..4d164d58a2 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -122,6 +122,14 @@ struct ShapesAreSameRank : public Rule } }; +struct ShapesAreSameTotalSize : public Rule +{ + ShapesAreSameTotalSize(const TensorInfo& info0, const TensorInfo& info1) + { + m_Res = info0.GetNumElements() == info1.GetNumElements(); + } +}; + struct ShapesAreBroadcastCompatible : public Rule { unsigned int CalcInputSize(const TensorShape& in, const TensorShape& out, unsigned int idx) @@ -719,6 +727,34 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input, &TrueFunc<>); } +bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + bool supported = true; + + // Define supported output types. + std::array supportedInputTypes = { + DataType::Float32, + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported, + "Reference quantize: input type not supported."); + + // Define supported output types. + std::array supportedOutputTypes = { + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported, + "Reference quantize: output type not supported."); + + supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, + "Reference quantize: input and output shapes have different num total elements."); + + return supported; +} + bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported) const diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 42a5a4455e..53a1abf150 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -196,6 +196,10 @@ public: const Pooling2dDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsQuantizeSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsReshapeSupported(const TensorInfo& input, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index dda1819adf..7fbd359282 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -19,7 +19,6 @@ namespace { static const BackendId s_Id{RefBackendId()}; } - template std::unique_ptr RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const @@ -348,6 +347,12 @@ std::unique_ptr RefWorkloadFactory::CreatePreCompiled(const PreCompil return nullptr; } +std::unique_ptr RefWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + std::unique_ptr RefWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 14d3178e74..86f1ec3c14 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -180,6 +180,9 @@ public: std::unique_ptr CreateDequantize(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateQuantize(const QuantizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + private: template diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 90aa63a62c..f2b1153a71 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -59,6 +59,7 @@ BACKEND_SOURCES := \ workloads/RefPermuteWorkload.cpp \ workloads/RefPooling2dFloat32Workload.cpp \ workloads/RefPooling2dUint8Workload.cpp \ + workloads/RefQuantizeWorkload.cpp \ workloads/RefReshapeFloat32Workload.cpp \ workloads/RefReshapeUint8Workload.cpp \ workloads/RefResizeBilinearFloat32Workload.cpp \ diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index c4fc202250..4f5fbb554e 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -97,6 +97,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefPooling2dFloat32Workload.hpp RefPooling2dUint8Workload.cpp RefPooling2dUint8Workload.hpp + RefQuantizeWorkload.cpp + RefQuantizeWorkload.hpp RefReshapeFloat32Workload.cpp RefReshapeFloat32Workload.hpp RefReshapeUint8Workload.cpp diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp new file mode 100644 index 0000000000..b7ace32e14 --- /dev/null +++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp @@ -0,0 +1,66 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefQuantizeWorkload.hpp" + +#include + + +namespace armnn +{ + +namespace +{ + +template +void QuantizeImpl(const void *input, void *output, size_t numValues, float scale, int offset) +{ + auto in = static_cast(input); + auto out = static_cast(output); + for (size_t i = 0; i < numValues; i++, in++, out++) + { + *out = armnn::Quantize(*in, scale, offset); + } +} + +} //namespace + +RefQuantizeWorkload::RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info) + : BaseWorkload(descriptor, info) + , m_NumElements(info.m_InputTensorInfos[0].GetNumElements()) + , m_TargetType(info.m_OutputTensorInfos[0].GetDataType()) + , m_Scale(info.m_OutputTensorInfos[0].GetQuantizationScale()) + , m_Offset(info.m_OutputTensorInfos[0].GetQuantizationOffset()) +{ +} + +void RefQuantizeWorkload::Execute() const +{ + const void* input = m_Data.m_Inputs[0]->Map(true); + void* output = m_Data.m_Outputs[0]->Map(true); + + switch(m_TargetType) + { + case DataType::QuantisedAsymm8: + { + QuantizeImpl(input, output, m_NumElements, m_Scale, m_Offset); + break; + } + case DataType::QuantisedSymm16: + { + QuantizeImpl(input, output, m_NumElements, m_Scale, 0); + break; + } + default: + { + BOOST_ASSERT_MSG(false, "RefQuantizeWorkload: Non quantized output type encountered"); + } + } + + m_Data.m_Inputs[0]->Unmap(); + m_Data.m_Outputs[0]->Unmap(); +} + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.hpp b/src/backends/reference/workloads/RefQuantizeWorkload.hpp new file mode 100644 index 0000000000..6a43b8471d --- /dev/null +++ b/src/backends/reference/workloads/RefQuantizeWorkload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn { + +class RefQuantizeWorkload : public BaseWorkload +{ +public: + RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info); + void Execute() const override; + +private: + size_t m_NumElements; + armnn::DataType m_TargetType; + float m_Scale; + int m_Offset; +}; + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 7d2e813f6b..77aa56fcc6 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -64,3 +64,5 @@ #include "RefRsqrtFloat32Workload.hpp" #include "RefComparisonWorkload.hpp" #include "RefDequantizeWorkload.hpp" + +#include "RefQuantizeWorkload.hpp" \ No newline at end of file -- cgit v1.2.1