diff options
author | Finn Williams <Finn.Williams@arm.com> | 2019-12-04 14:27:27 +0000 |
---|---|---|
committer | Jim Flynn Arm <jim.flynn@arm.com> | 2019-12-09 15:39:16 +0000 |
commit | fd2710651ada27fc82f28c07fb1e09effc3bda2d (patch) | |
tree | 7c2200489c7a3f845b91362c2c8d66ab9c6101e8 /src/backends/reference/workloads | |
parent | 6a5e5e8b7e56f927d70ced3203d6e16df3fdd189 (diff) | |
download | armnn-fd2710651ada27fc82f28c07fb1e09effc3bda2d.tar.gz |
IVGCVSW-4211 Add Signed 8 bit Quantisation support into the Reference backend
!android-nn-driver:2435
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I10ecd4a8937725953396805f33a3562a5384c4d4
Diffstat (limited to 'src/backends/reference/workloads')
4 files changed, 62 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index ca5110c2fd..ca6d3cbc60 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -137,6 +137,25 @@ private: const int32_t m_Offset; }; +class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>> +{ +public: + QSymmS8Decoder(const int8_t* data, const float scale, const int32_t offset) + : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + + QSymmS8Decoder(const float scale, const int32_t offset) + : QSymmS8Decoder(nullptr, scale, offset) {} + + float Get() const override + { + return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); + } + +private: + const float m_Scale; + const int32_t m_Offset; +}; + class QSymm16Decoder : public TypedIterator<const int16_t, Decoder<float>> { public: @@ -245,6 +264,30 @@ private: const int32_t m_Offset; }; +class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>> +{ +public: + QSymmS8Encoder(int8_t* data, const float scale, const int32_t offset) + : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + + QSymmS8Encoder(const float scale, const int32_t offset) + : QSymmS8Encoder(nullptr, scale, offset) {} + + void Set(float right) override + { + *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset); + } + + float Get() const override + { + return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); + } + +private: + const float m_Scale; + const int32_t m_Offset; +}; + class QSymm16Encoder : public TypedIterator<int16_t, Encoder<float>> { public: diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index b9cd7f9573..9d41c9e9e7 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -105,6 +105,13 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const { return MakeSigned32Decoder(info, data); } + case DataType::QSymmS8: + { + return std::make_unique<QSymmS8Decoder>( + static_cast<const int8_t*>(data), + info.GetQuantizationScale(), + info.GetQuantizationOffset()); + } default: { BOOST_ASSERT_MSG(false, "Unsupported Data Type!"); diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index 0d578d68de..92493ed641 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -37,6 +37,13 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void* info.GetQuantizationScale(), info.GetQuantizationOffset()); } + case DataType::QSymmS8: + { + return std::make_unique<QSymmS8Encoder>( + static_cast<int8_t*>(data), + info.GetQuantizationScale(), + info.GetQuantizationOffset()); + } case armnn::DataType::QuantisedSymm16: { return std::make_unique<QSymm16Encoder>( diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp index b7ace32e14..a78804b709 100644 --- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp +++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp @@ -48,6 +48,11 @@ void RefQuantizeWorkload::Execute() const QuantizeImpl<uint8_t>(input, output, m_NumElements, m_Scale, m_Offset); break; } + case DataType::QSymmS8: + { + QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset); + break; + } case DataType::QuantisedSymm16: { QuantizeImpl<int16_t>(input, output, m_NumElements, m_Scale, 0); |