aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/NetworkQuantizerUtils.cpp
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-03-20 11:51:14 +0000
committernattapat.chaimanowong <nattapat.chaimanowong@arm.com>2019-03-20 14:49:03 +0000
commit7ac07f355f4cb75a54ec423670b7078bd0ecb44d (patch)
tree5f28c73decbfe0221c2ecedc204f48a7c00884f0 /src/armnn/NetworkQuantizerUtils.cpp
parent2a434a8a23d75fb62ac0cb3ecb83ba7aab89b8c6 (diff)
downloadarmnn-7ac07f355f4cb75a54ec423670b7078bd0ecb44d.tar.gz
IVGCVSW-2858 Add support for QSymm16 quantization
Change-Id: Ia7c305c30c39ec0e9db447a461479be17fde250c Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
Diffstat (limited to 'src/armnn/NetworkQuantizerUtils.cpp')
-rw-r--r--src/armnn/NetworkQuantizerUtils.cpp28
1 files changed, 5 insertions, 23 deletions
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp
index 551760f362..a6f9ebdc42 100644
--- a/src/armnn/NetworkQuantizerUtils.cpp
+++ b/src/armnn/NetworkQuantizerUtils.cpp
@@ -12,24 +12,6 @@
namespace armnn
{
-std::pair<float, int> ComputeQAsymmParams(int numBits, double min, double max)
-{
- BOOST_ASSERT_MSG(min < max, "min >= max will result in invalid quantization.");
- double highest = (1 << numBits) - 1;
-
- min = std::min(0.0, min); // min <= 0.0
- max = std::max(0.0, max); // max >= 0.0
-
- // Assumes quantization range [0-highest]
- double scale = (max-min) / highest;
- double offset = -min / scale;
-
- // Clamp offset [0-highest]
- offset = std::max(0.0, std::min(highest, offset));
-
- return std::make_pair(static_cast<float>(scale), static_cast<int>(std::round(offset)));
-}
-
ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing)
{
float scale = 0.0f;
@@ -43,11 +25,11 @@ ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>
{
case DataType::Float32:
{
- Quantize(static_cast<const float*>(tensor.GetMemoryArea()),
- backing.data(),
- backing.size(),
- scale,
- offset);
+ QuantizeConstant(static_cast<const float*>(tensor.GetMemoryArea()),
+ backing.data(),
+ backing.size(),
+ scale,
+ offset);
}
break;
default: