diff options
author | Jim Flynn <jim.flynn@arm.com> | 2019-05-02 11:33:25 +0100 |
---|---|---|
committer | Ruomei Yan <ruomei.yan@arm.com> | 2019-05-08 16:10:59 +0000 |
commit | f92dfced4498f12b9315c0fa377ba7be8998b607 (patch) | |
tree | 4015208a5493ea414babba76c18bc72b6dbef875 /include/armnnQuantizer | |
parent | a4247d5a50502811a6956dffd990c0254622b7e1 (diff) | |
download | armnn-f92dfced4498f12b9315c0fa377ba7be8998b607.tar.gz |
IVGCVSW-2833 Add Dynamic Quantization
Change-Id: Iba91e3f3625639f01d66f81a9f3e419e0e285d66
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'include/armnnQuantizer')
-rw-r--r-- | include/armnnQuantizer/INetworkQuantizer.hpp | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/include/armnnQuantizer/INetworkQuantizer.hpp b/include/armnnQuantizer/INetworkQuantizer.hpp new file mode 100644 index 0000000000..89548d1057 --- /dev/null +++ b/include/armnnQuantizer/INetworkQuantizer.hpp @@ -0,0 +1,51 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <armnn/INetwork.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +struct QuantizerOptions +{ + QuantizerOptions() : m_ActivationFormat(DataType::QuantisedAsymm8) {} + QuantizerOptions(DataType activationFormat) : m_ActivationFormat(activationFormat) {} + + DataType m_ActivationFormat; +}; + +using INetworkQuantizerPtr = std::unique_ptr<class INetworkQuantizer, void(*)(INetworkQuantizer* quantizer)>; + +/// Quantizer class Quantizes a float32 InputNetwork +class INetworkQuantizer +{ +public: + /// Create Quantizer object and return raw pointer + static INetworkQuantizer* CreateRaw(INetwork* inputNetwork, const QuantizerOptions& options = QuantizerOptions()); + + /// Create Quantizer object wrapped in unique_ptr + static INetworkQuantizerPtr Create(INetwork* inputNetwork, const QuantizerOptions& options = QuantizerOptions()); + + /// Destroy Quantizer object + static void Destroy(INetworkQuantizer* quantizer); + + /// Overrides the default quantization values for the input layer with the given id + virtual void OverrideInputRange(LayerBindingId layerId, float min, float max) = 0; + + /// Refine input network with a set of refinement data for specified LayerBindingId + virtual void Refine(const InputTensors& inputTensors) = 0; + + /// Extract final quantized network + virtual INetworkPtr ExportNetwork() = 0; + +protected: + virtual ~INetworkQuantizer() {} +}; + +} //namespace armnn |