aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp9
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp3
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp43
-rw-r--r--src/backends/reference/workloads/Decoders.hpp7
-rw-r--r--src/backends/reference/workloads/Encoders.hpp7
5 files changed, 66 insertions, 3 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 0ca19bbb06..c60348e529 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -610,7 +610,8 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType,3> supportedInputTypes = {
+ std::array<DataType,4> supportedInputTypes = {
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QSymmS16
@@ -1439,9 +1440,10 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
bool supported = true;
// Define supported input types.
- std::array<DataType,5> supportedInputTypes = {
+ std::array<DataType,6> supportedInputTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QSymmS16
@@ -1451,8 +1453,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
"Reference quantize: input type not supported.");
// Define supported output types.
- std::array<DataType,3> supportedOutputTypes = {
+ std::array<DataType,4> supportedOutputTypes = {
DataType::QAsymmU8,
+ DataType::QAsymmS8,
DataType::QSymmS8,
DataType::QSymmS16
};
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index b0d8db802e..99468e0006 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1419,6 +1419,8 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
// Dequantize
ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleAsymmInt8, DequantizeSimpleAsymmInt8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeOffsetAsymmInt8, DequantizeOffsetAsymmInt8Test)
ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt8, DequantizeSimpleInt8Test)
ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
@@ -1428,6 +1430,7 @@ ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Tes
// Quantize
ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
+ARMNN_AUTO_TEST_CASE(QuantizeClampAsymmInt8, QuantizeClampAsymmInt8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampInt8, QuantizeClampInt8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test)
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 581aabfcd2..c48201837b 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -137,6 +137,25 @@ private:
const int32_t m_Offset;
};
+class QASymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
+{
+public:
+ QASymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
+ : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
+
+ QASymmS8Decoder(const float scale, const int32_t offset)
+ : QASymmS8Decoder(nullptr, scale, offset) {}
+
+ float Get() const override
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
+ }
+
+private:
+ const float m_Scale;
+ const int32_t m_Offset;
+};
+
class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
{
public:
@@ -264,6 +283,30 @@ private:
const int32_t m_Offset;
};
+class QASymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
+{
+public:
+ QASymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
+ : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
+
+ QASymmS8Encoder(const float scale, const int32_t offset)
+ : QASymmS8Encoder(nullptr, scale, offset) {}
+
+ void Set(float right) override
+ {
+ *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
+ }
+
+ float Get() const override
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
+ }
+
+private:
+ const float m_Scale;
+ const int32_t m_Offset;
+};
+
class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
{
public:
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 6f309787bd..6a8c756048 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -81,6 +81,13 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
params.first);
}
ARMNN_NO_DEPRECATE_WARN_END
+ case DataType::QAsymmS8:
+ {
+ return std::make_unique<QASymmS8Decoder>(
+ static_cast<const int8_t*>(data),
+ info.GetQuantizationScale(),
+ info.GetQuantizationOffset());
+ }
case DataType::QAsymmU8:
{
return std::make_unique<QASymm8Decoder>(
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 8ddd559448..f52297602f 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -32,6 +32,13 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
params.first);
}
ARMNN_NO_DEPRECATE_WARN_END
+ case armnn::DataType::QAsymmS8:
+ {
+ return std::make_unique<QASymmS8Encoder>(
+ static_cast<int8_t*>(data),
+ info.GetQuantizationScale(),
+ info.GetQuantizationOffset());
+ }
case armnn::DataType::QAsymmU8:
{
return std::make_unique<QASymm8Encoder>(