aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/NetworkQuantizerUtils.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/NetworkQuantizerUtils.hpp')
-rw-r--r--src/armnn/NetworkQuantizerUtils.hpp56
1 files changed, 56 insertions, 0 deletions
diff --git a/src/armnn/NetworkQuantizerUtils.hpp b/src/armnn/NetworkQuantizerUtils.hpp
new file mode 100644
index 0000000000..458d21a974
--- /dev/null
+++ b/src/armnn/NetworkQuantizerUtils.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+#include <armnn/ILayerVisitor.hpp>
+
+#include <utility>
+#include <limits>
+
+#include <boost/assert.hpp>
+
+namespace armnn
+{
+
+std::pair<int, float> ComputeQAsymmParams(int numBits, double min, double max);
+
+template<typename srcType>
+void Quantize(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
+{
+ BOOST_ASSERT(src);
+ BOOST_ASSERT(dst);
+
+ float min = std::numeric_limits<srcType>::max();
+ float max = std::numeric_limits<srcType>::lowest();
+ for (size_t i = 0; i < numElements; ++i)
+ {
+ min = std::min(min, src[i]);
+ max = std::max(max, src[i]);
+ }
+
+ auto qParams = ComputeQAsymmParams(8, min, max);
+ offset = qParams.first;
+ scale = qParams.second;
+ for (size_t i = 0; i < numElements; ++i)
+ {
+ dst[i] = armnn::Quantize<uint8_t>(src[i], scale, offset);
+ }
+}
+
+ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing);
+
+template <typename LayerContainer>
+void VisitLayers(const LayerContainer& layerContainer, ILayerVisitor& visitor)
+{
+ for (auto layer : layerContainer)
+ {
+ layer->Accept(visitor);
+ }
+}
+
+} // namespace armnn