diff options
author | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-02-07 17:51:09 +0000 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-02-08 12:23:05 +0000 |
commit | a8d572dc48f47e66cd7abd6ad9b2d3a0f40ea94b (patch) | |
tree | 5de7809a8fbc19d6d2a940a51a982bd633156945 /src/armnn/NetworkQuantizerUtils.cpp | |
parent | e0a4ad8a8e6ef271883e8029985eeab16d838972 (diff) | |
download | armnn-a8d572dc48f47e66cd7abd6ad9b2d3a0f40ea94b.tar.gz |
IVGCVSW-2607 Implement Input range override mechanism
* Added the OverrideInputRange method to the Quantizer API
* Created OverrideInputRangeVisitor to implement the override mechanism
* Moved the quantizer utility functions to the new NetworkQuantizerUtils files
* Moved the map of quantization ranges out of the StaticRangeVisitor
and into the NetworkQuantizer
* Added unit tests
* Code refactoring and cleanup
Change-Id: I9c1d006c1b6a35fbc04584a832fbe489f8f9276d
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/armnn/NetworkQuantizerUtils.cpp')
-rw-r--r-- | src/armnn/NetworkQuantizerUtils.cpp | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp new file mode 100644 index 0000000000..1bec63b58c --- /dev/null +++ b/src/armnn/NetworkQuantizerUtils.cpp @@ -0,0 +1,61 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NetworkQuantizerUtils.hpp" + +#include <algorithm> +#include <cmath> +#include <stdint.h> + +namespace armnn +{ + +std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max) +{ + BOOST_ASSERT_MSG(min < max, "min >= max will result in invalid quantization."); + double highest = (1 << numBits) - 1; + + min = std::min(0.0, min); // min <= 0.0 + max = std::max(0.0, max); // max >= 0.0 + + // Assumes quantization range [0-highest] + double scale = (max-min) / highest; + double offset = -min / scale; + + // Clamp offset [0-highest] + offset = std::max(0.0, std::min(highest, offset)); + + return std::make_pair(static_cast<int>(std::round(offset)), static_cast<float>(scale)); +} + +ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing) +{ + float scale = 0.0f; + int offset = 0; + + // Reserve the backing memory + backing.resize(tensor.GetInfo().GetNumElements()); + + DataType type = tensor.GetInfo().GetDataType(); + switch(type) + { + case DataType::Float32: + { + Quantize(static_cast<const float*>(tensor.GetMemoryArea()), + backing.data(), + backing.size(), + scale, + offset); + } + break; + default: + BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type"); + } + + TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QuantisedAsymm8, scale, offset); + return ConstTensor(qInfo, backing); +} + +} // namespace armnn |