aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2019-11-04 08:58:33 +0000
committerKeith Davis <keith.davis@arm.com>2019-11-04 16:46:35 +0000
commit5236e1d6bcff6ebec7ec10d7d416cc6ead5482dd (patch)
tree4152c5fcd6b9c11848a02dfa4ff8705a2cfae0a5
parentf71079328ae72a65c91e410b2bd35eabb67cb6d1 (diff)
downloadarmnn-5236e1d6bcff6ebec7ec10d7d416cc6ead5482dd.tar.gz
IVGCVSW-3835 Create Encoder and Decoder for QSymm8PerAxis
* Add QuantizedSymm8PerAxis to armnn DataType (types.hpp) and * Add Quantize and Dequantize template for int8 in TypeUtils to be able to compute QSymm8 of the weight * Create PerAxisIterator for per-axis quantization * Create QSymm8PerAxisDecoder * Create QSymm8PerAxisEncoder Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: Ibcfe0288a197b7ee50b543bdbd77b7edb8a547c2
-rw-r--r--include/armnn/Types.hpp3
-rw-r--r--src/armnn/TypesUtils.cpp8
-rw-r--r--src/armnnUtils/TensorUtils.hpp30
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp114
-rw-r--r--src/backends/reference/workloads/Decoders.hpp11
-rw-r--r--src/backends/reference/workloads/Encoders.hpp11
6 files changed, 173 insertions, 4 deletions
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 16a148c9c2..51162e6cf3 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -31,7 +31,8 @@ enum class DataType
QuantisedAsymm8 = 2,
Signed32 = 3,
Boolean = 4,
- QuantisedSymm16 = 5
+ QuantisedSymm16 = 5,
+ QuantizedSymm8PerAxis = 6
};
enum class DataLayout
diff --git a/src/armnn/TypesUtils.cpp b/src/armnn/TypesUtils.cpp
index cdc30da8ca..83c56c491c 100644
--- a/src/armnn/TypesUtils.cpp
+++ b/src/armnn/TypesUtils.cpp
@@ -33,6 +33,10 @@ float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
return dequantized;
}
+/// Explicit specialization of Quantize for int8_t
+template
+int8_t armnn::Quantize<int8_t>(float value, float scale, int32_t offset);
+
/// Explicit specialization of Quantize for uint8_t
template
uint8_t armnn::Quantize<uint8_t>(float value, float scale, int32_t offset);
@@ -45,6 +49,10 @@ int16_t armnn::Quantize<int16_t>(float value, float scale, int32_t offset);
template
int32_t armnn::Quantize<int32_t>(float value, float scale, int32_t offset);
+/// Explicit specialization of Dequantize for int8_t
+template
+float armnn::Dequantize<int8_t>(int8_t value, float scale, int32_t offset);
+
/// Explicit specialization of Dequantize for uint8_t
template
float armnn::Dequantize<uint8_t>(uint8_t value, float scale, int32_t offset);
diff --git a/src/armnnUtils/TensorUtils.hpp b/src/armnnUtils/TensorUtils.hpp
index 2b1f6a24f3..32af179bdc 100644
--- a/src/armnnUtils/TensorUtils.hpp
+++ b/src/armnnUtils/TensorUtils.hpp
@@ -7,6 +7,8 @@
#include <armnn/TypesUtils.hpp>
+#include <boost/assert.hpp>
+
namespace armnnUtils
{
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
@@ -32,4 +34,32 @@ unsigned int GetNumElementsBetween(const armnn::TensorShape& shape,
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis);
+inline unsigned int GetNumElementsAfter(const armnn::TensorShape& shape,
+ unsigned int axis)
+{
+ unsigned int numDim = shape.GetNumDimensions();
+ BOOST_ASSERT(0 >= axis);
+ BOOST_ASSERT(axis < numDim - 1);
+ unsigned int count = 1;
+ for (unsigned int i = axis; i < numDim; i++)
+ {
+ count *= shape[i];
+ }
+ return count;
+}
+
+inline std::pair<unsigned int, std::vector<float>> GetPerAxisParams(const armnn::TensorInfo& info)
+{
+ const std::vector<float>& scales = info.GetQuantizationScales();
+ armnn::Optional<unsigned int> quantizationDim = info.GetQuantizationDim();
+ if (scales.size() < 1 || !quantizationDim.has_value())
+ {
+ throw armnn::InvalidArgumentException(
+ "We currently support only per-axis symmetric quantization for QuantizedSymm8.");
+ }
+ unsigned int axisFactor = GetNumElementsAfter(info.GetShape(), quantizationDim.value());
+
+ return {axisFactor, scales};
+}
+
} // namespace armnnUtils
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 18270faf46..9fe3f15f9b 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -339,4 +339,116 @@ public:
}
};
-} //namespace armnn
+// PerAxisIterator for per-axis quantization
+template<typename T, typename Base>
+class PerAxisIterator : public Base
+{
+public:
+ // axisFactor is used to calculate axisIndex
+ PerAxisIterator(T* data = nullptr, unsigned int axisFactor = 0)
+ : m_Iterator(data), m_Start(data), m_AxisIndex(0), m_AxisFactor(axisFactor)
+ {}
+
+ // This should be called to set index for per-axis Encoder/Decoder
+ PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex)
+ {
+ BOOST_ASSERT(m_Iterator);
+ m_Iterator = m_Start + index;
+ m_AxisIndex = axisIndex;
+ return *this;
+ }
+
+ void Reset(void* data) override
+ {
+ m_Iterator = reinterpret_cast<T*>(data);
+ m_Start = m_Iterator;
+ m_AxisIndex = 0;
+ }
+
+ PerAxisIterator& operator++() override
+ {
+ BOOST_ASSERT(m_Iterator);
+ ++m_Iterator;
+ m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+ return *this;
+ }
+
+ PerAxisIterator& operator+=(const unsigned int increment) override
+ {
+ BOOST_ASSERT(m_Iterator);
+ m_Iterator += increment;
+ m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+ return *this;
+ }
+
+ PerAxisIterator& operator-=(const unsigned int decrement) override
+ {
+ BOOST_ASSERT(m_Iterator);
+ m_Iterator -= decrement;
+ m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+ return *this;
+ }
+
+ PerAxisIterator& operator[](const unsigned int index) override
+ {
+ BOOST_ASSERT(m_Iterator);
+ m_Iterator = m_Start + index;
+ m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+ return *this;
+ }
+
+ protected:
+ T* m_Iterator;
+ T* m_Start;
+ unsigned int m_AxisIndex;
+ unsigned int m_AxisFactor;
+};
+
+class QSymm8PerAxisDecoder : public PerAxisIterator<const int8_t, Decoder<float>>
+{
+public:
+ QSymm8PerAxisDecoder(const int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
+ : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
+
+ float Get() const override
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
+ }
+
+ // Get scale of the current value
+ float GetScale() const
+ {
+ return m_Scale[m_AxisIndex];
+ }
+
+private:
+ std::vector<float> m_Scale;
+};
+
+class QSymm8PerAxisEncoder : public PerAxisIterator<int8_t, Encoder<float>>
+{
+public:
+ QSymm8PerAxisEncoder(int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
+ : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
+
+ void Set(float right)
+ {
+ *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale[m_AxisIndex], 0);
+ }
+
+ float Get() const
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
+ }
+
+ // Get scale of the current value
+ float GetScale() const
+ {
+ return m_Scale[m_AxisIndex];
+ }
+
+private:
+ std::vector<float> m_Scale;
+};
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 328a5eb0f7..dd2b28a50f 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -7,6 +7,7 @@
#include "BaseIterator.hpp"
#include "FloatingPointConverter.hpp"
+#include "TensorUtils.hpp"
#include <boost/assert.hpp>
@@ -21,6 +22,14 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
{
switch(info.GetDataType())
{
+ case armnn::DataType::QuantizedSymm8PerAxis:
+ {
+ std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
+ return std::make_unique<QSymm8PerAxisDecoder>(
+ static_cast<const int8_t*>(data),
+ params.second,
+ params.first);
+ }
case DataType::QuantisedAsymm8:
{
return std::make_unique<QASymm8Decoder>(
@@ -55,7 +64,7 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
}
default:
{
- BOOST_ASSERT_MSG(false, "Not supported Data Type!");
+ BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 2b3a11af06..5c0cffa7ca 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -6,6 +6,7 @@
#pragma once
#include "BaseIterator.hpp"
+#include "TensorUtils.hpp"
#include <boost/assert.hpp>
@@ -20,6 +21,14 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
{
switch(info.GetDataType())
{
+ case armnn::DataType::QuantizedSymm8PerAxis:
+ {
+ std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
+ return std::make_unique<QSymm8PerAxisEncoder>(
+ static_cast<int8_t*>(data),
+ params.second,
+ params.first);
+ }
case armnn::DataType::QuantisedAsymm8:
{
return std::make_unique<QASymm8Encoder>(
@@ -48,7 +57,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Cannot encode from float. Not supported target Data Type!");
+ BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
break;
}
}