From f92dfced4498f12b9315c0fa377ba7be8998b607 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Thu, 2 May 2019 11:33:25 +0100 Subject: IVGCVSW-2833 Add Dynamic Quantization Change-Id: Iba91e3f3625639f01d66f81a9f3e419e0e285d66 Signed-off-by: Jim Flynn --- include/armnn/ILayerVisitor.hpp | 2 ++ include/armnn/INetworkQuantizer.hpp | 51 ---------------------------- include/armnn/TypesUtils.hpp | 4 +-- include/armnnQuantizer/INetworkQuantizer.hpp | 51 ++++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 53 deletions(-) delete mode 100644 include/armnn/INetworkQuantizer.hpp create mode 100644 include/armnnQuantizer/INetworkQuantizer.hpp (limited to 'include') diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index eabad58366..ab793bc587 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -347,6 +347,8 @@ public: virtual void VisitSwitchLayer(const IConnectableLayer* layer, const char* name = nullptr) = 0; + virtual void StartVisit() {} + virtual void FinishVisit() {} }; } // namespace armnn diff --git a/include/armnn/INetworkQuantizer.hpp b/include/armnn/INetworkQuantizer.hpp deleted file mode 100644 index 89548d1057..0000000000 --- a/include/armnn/INetworkQuantizer.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include -#include -#include - -namespace armnn -{ - -struct QuantizerOptions -{ - QuantizerOptions() : m_ActivationFormat(DataType::QuantisedAsymm8) {} - QuantizerOptions(DataType activationFormat) : m_ActivationFormat(activationFormat) {} - - DataType m_ActivationFormat; -}; - -using INetworkQuantizerPtr = std::unique_ptr; - -/// Quantizer class Quantizes a float32 InputNetwork -class INetworkQuantizer -{ -public: - /// Create Quantizer object and return raw pointer - static INetworkQuantizer* CreateRaw(INetwork* inputNetwork, const QuantizerOptions& options = QuantizerOptions()); - - /// Create Quantizer object wrapped in unique_ptr - static INetworkQuantizerPtr Create(INetwork* inputNetwork, const QuantizerOptions& options = QuantizerOptions()); - - /// Destroy Quantizer object - static void Destroy(INetworkQuantizer* quantizer); - - /// Overrides the default quantization values for the input layer with the given id - virtual void OverrideInputRange(LayerBindingId layerId, float min, float max) = 0; - - /// Refine input network with a set of refinement data for specified LayerBindingId - virtual void Refine(const InputTensors& inputTensors) = 0; - - /// Extract final quantized network - virtual INetworkPtr ExportNetwork() = 0; - -protected: - virtual ~INetworkQuantizer() {} -}; - -} //namespace armnn diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp index 837490d258..cb52471cd5 100644 --- a/include/armnn/TypesUtils.hpp +++ b/include/armnn/TypesUtils.hpp @@ -4,8 +4,8 @@ // #pragma once -#include "Tensor.hpp" -#include "Types.hpp" +#include +#include #include #include diff --git a/include/armnnQuantizer/INetworkQuantizer.hpp b/include/armnnQuantizer/INetworkQuantizer.hpp new file mode 100644 index 0000000000..89548d1057 --- /dev/null +++ b/include/armnnQuantizer/INetworkQuantizer.hpp @@ -0,0 +1,51 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include + +namespace armnn +{ + +struct QuantizerOptions +{ + QuantizerOptions() : m_ActivationFormat(DataType::QuantisedAsymm8) {} + QuantizerOptions(DataType activationFormat) : m_ActivationFormat(activationFormat) {} + + DataType m_ActivationFormat; +}; + +using INetworkQuantizerPtr = std::unique_ptr; + +/// Quantizer class Quantizes a float32 InputNetwork +class INetworkQuantizer +{ +public: + /// Create Quantizer object and return raw pointer + static INetworkQuantizer* CreateRaw(INetwork* inputNetwork, const QuantizerOptions& options = QuantizerOptions()); + + /// Create Quantizer object wrapped in unique_ptr + static INetworkQuantizerPtr Create(INetwork* inputNetwork, const QuantizerOptions& options = QuantizerOptions()); + + /// Destroy Quantizer object + static void Destroy(INetworkQuantizer* quantizer); + + /// Overrides the default quantization values for the input layer with the given id + virtual void OverrideInputRange(LayerBindingId layerId, float min, float max) = 0; + + /// Refine input network with a set of refinement data for specified LayerBindingId + virtual void Refine(const InputTensors& inputTensors) = 0; + + /// Extract final quantized network + virtual INetworkPtr ExportNetwork() = 0; + +protected: + virtual ~INetworkQuantizer() {} +}; + +} //namespace armnn -- cgit v1.2.1