aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-29 17:58:36 +0000
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-10-30 13:41:10 +0000
commitc0a87c14bd2bc8a02f6c5c9f919abca27ca4dde0 (patch)
tree3bdd47db9baec3b1566eee83145866946913f8bf /src
parentc04019985db1ee44c71834892ad17365185a3f8d (diff)
downloadarmnn-c0a87c14bd2bc8a02f6c5c9f919abca27ca4dde0.tar.gz
IVGCVSW-3831 Add support of per-axis quantization to TensorInfo
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Iea09539c92d51e546fbad8b2903b59fc08d66618
Diffstat (limited to 'src')
-rw-r--r--src/armnn/Tensor.cpp116
-rw-r--r--src/armnn/test/TensorTest.cpp34
2 files changed, 135 insertions, 15 deletions
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index 614abc77f5..f4b8b509b6 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "armnn/Tensor.hpp"
#include "armnn/Utils.hpp"
#include "armnn/Exceptions.hpp"
@@ -138,30 +139,57 @@ TensorInfo::TensorInfo()
{
}
-TensorInfo::TensorInfo(const TensorShape& shape, DataType dataType,
- float quantizationScale, int32_t quantizationOffset)
- : m_Shape(shape)
- , m_DataType(dataType)
+TensorInfo::TensorInfo(const TensorShape& shape,
+ DataType dataType,
+ float quantizationScale,
+ int32_t quantizationOffset)
+ : m_Shape(shape)
+ , m_DataType(dataType)
+{
+ SetQuantizationScale(quantizationScale);
+ SetQuantizationOffset(quantizationOffset);
+}
+
+TensorInfo::TensorInfo(unsigned int numDimensions,
+ const unsigned int* dimensionSizes,
+ DataType dataType,
+ float quantizationScale,
+ int32_t quantizationOffset)
+ : m_Shape(numDimensions, dimensionSizes)
+ , m_DataType(dataType)
{
- m_Quantization.m_Scale = quantizationScale;
- m_Quantization.m_Offset = quantizationOffset;
+ SetQuantizationScale(quantizationScale);
+ SetQuantizationOffset(quantizationOffset);
}
-TensorInfo::TensorInfo(unsigned int numDimensions, const unsigned int* dimensionSizes, DataType dataType,
- float quantizationScale, int32_t quantizationOffset)
- : m_Shape(numDimensions, dimensionSizes)
- , m_DataType(dataType)
+TensorInfo::TensorInfo(const TensorShape& shape,
+ DataType dataType,
+ const std::vector<float>& quantizationScales,
+ unsigned int quantizationDim)
+ : m_Shape(shape)
+ , m_DataType(dataType)
{
- m_Quantization.m_Scale = quantizationScale;
- m_Quantization.m_Offset = quantizationOffset;
+ SetQuantizationScales(quantizationScales);
+ SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
+}
+
+TensorInfo::TensorInfo(unsigned int numDimensions,
+ const unsigned int* dimensionSizes,
+ DataType dataType,
+ const std::vector<float>& quantizationScales,
+ unsigned int quantizationDim)
+ : m_Shape(numDimensions, dimensionSizes)
+ , m_DataType(dataType)
+{
+ SetQuantizationScales(quantizationScales);
+ SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
}
TensorInfo::TensorInfo(const TensorInfo& other)
: m_Shape(other.m_Shape)
, m_DataType(other.m_DataType)
, m_Quantization(other.m_Quantization)
-{
-}
+{}
TensorInfo& TensorInfo::operator=(const TensorInfo& other)
{
@@ -194,7 +222,7 @@ bool TensorInfo::IsTypeSpaceMatch(const TensorInfo& other) const
match &= m_DataType == other.m_DataType;
- if (IsQuantized())
+ if (IsQuantized() && !HasMultipleQuantizationScales())
{
match &= GetQuantizationScale() == other.GetQuantizationScale() &&
GetQuantizationOffset() == other.GetQuantizationOffset();
@@ -202,6 +230,64 @@ bool TensorInfo::IsTypeSpaceMatch(const TensorInfo& other) const
return match;
}
+std::vector<float> TensorInfo::GetQuantizationScales() const
+{
+ return m_Quantization.m_Scales;
+}
+
+void TensorInfo::SetQuantizationScales(const std::vector<float>& scales)
+{
+ m_Quantization.m_Scales = scales;
+}
+
+float TensorInfo::GetQuantizationScale() const
+{
+ if (m_Quantization.m_Scales.empty())
+ {
+ // NOTE: old default for backward compatibility
+ return 1.0f;
+ }
+
+ BOOST_ASSERT(!HasMultipleQuantizationScales());
+ return m_Quantization.m_Scales[0];
+}
+
+void TensorInfo::SetQuantizationScale(float scale)
+{
+ m_Quantization.m_Scales = { scale };
+}
+
+int32_t TensorInfo::GetQuantizationOffset() const
+{
+ if (!m_Quantization.m_Offset.has_value())
+ {
+ // NOTE: old default for backward compatibility
+ return 0;
+ }
+
+ return m_Quantization.m_Offset.value();
+}
+
+void TensorInfo::SetQuantizationOffset(int32_t offset)
+{
+ m_Quantization.m_Offset = MakeOptional<int32_t>(offset);
+}
+
+Optional<unsigned int> TensorInfo::GetQuantizationDim() const
+{
+ return m_Quantization.m_QuantizationDim;
+}
+
+void TensorInfo::SetQuantizationDim(const Optional<unsigned int>& quantizationDim)
+{
+ m_Quantization.m_QuantizationDim = quantizationDim;
+}
+
+bool TensorInfo::IsQuantized() const
+{
+ return m_DataType == DataType::QuantisedAsymm8 || m_DataType == DataType::QuantisedSymm16;
+}
+
// ---
// --- BaseTensor
// ---
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index a0a6c7e91f..154a0bca04 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -143,4 +143,38 @@ BOOST_AUTO_TEST_CASE(TensorShapeOperatorBrackets)
BOOST_TEST(shape[2] == 20);
}
+BOOST_AUTO_TEST_CASE(TensorInfoPerAxisQuantization)
+{
+ // Old constructor
+ TensorInfo tensorInfo0({ 1, 1 }, DataType::Float32, 2.0f, 1);
+ BOOST_CHECK(!tensorInfo0.HasMultipleQuantizationScales());
+ BOOST_CHECK(tensorInfo0.GetQuantizationScale() == 2.0f);
+ BOOST_CHECK(tensorInfo0.GetQuantizationOffset() == 1);
+ BOOST_CHECK(tensorInfo0.GetQuantizationScales()[0] == 2.0f);
+ BOOST_CHECK(!tensorInfo0.GetQuantizationDim().has_value());
+
+ // Set per-axis quantization scales
+ std::vector<float> perAxisScales{ 3.0f, 4.0f };
+ tensorInfo0.SetQuantizationScales(perAxisScales);
+ BOOST_CHECK(tensorInfo0.HasMultipleQuantizationScales());
+ BOOST_CHECK(tensorInfo0.GetQuantizationScales() == perAxisScales);
+
+ // Set per-tensor quantization scale
+ tensorInfo0.SetQuantizationScale(5.0f);
+ BOOST_CHECK(!tensorInfo0.HasMultipleQuantizationScales());
+ BOOST_CHECK(tensorInfo0.GetQuantizationScales()[0] == 5.0f);
+
+ // Set quantization offset
+ tensorInfo0.SetQuantizationDim(Optional<unsigned int>(1));
+ BOOST_CHECK(tensorInfo0.GetQuantizationDim().value() == 1);
+
+ // New constructor
+ perAxisScales = { 6.0f, 7.0f };
+ TensorInfo tensorInfo1({ 1, 1 }, DataType::Float32, perAxisScales, 1);
+ BOOST_CHECK(tensorInfo1.HasMultipleQuantizationScales());
+ BOOST_CHECK(tensorInfo1.GetQuantizationOffset() == 0);
+ BOOST_CHECK(tensorInfo1.GetQuantizationScales() == perAxisScales);
+ BOOST_CHECK(tensorInfo1.GetQuantizationDim().value() == 1);
+}
+
BOOST_AUTO_TEST_SUITE_END()