aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp8
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp3
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp43
-rw-r--r--src/backends/reference/workloads/Decoders.hpp7
-rw-r--r--src/backends/reference/workloads/Encoders.hpp7
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp5
6 files changed, 70 insertions, 3 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 299503ddc6..19b76152f3 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -615,8 +615,9 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType,2> supportedInputTypes = {
+ std::array<DataType,3> supportedInputTypes = {
DataType::QuantisedAsymm8,
+ DataType::QSymmS8,
DataType::QuantisedSymm16
};
@@ -1398,7 +1399,7 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
{
bool supported = true;
- // Define supported output types.
+ // Define supported input types.
std::array<DataType,1> supportedInputTypes = {
DataType::Float32,
};
@@ -1407,8 +1408,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
"Reference quantize: input type not supported.");
// Define supported output types.
- std::array<DataType,2> supportedOutputTypes = {
+ std::array<DataType,3> supportedOutputTypes = {
DataType::QuantisedAsymm8,
+ DataType::QSymmS8,
DataType::QuantisedSymm16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index a397e935c1..b88f432acf 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1419,13 +1419,16 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
// Dequantize
ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt8, DequantizeSimpleInt8Test)
ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt8ToFp16, DequantizeSimpleInt8ToFp16Test)
ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
// Quantize
ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
+ARMNN_AUTO_TEST_CASE(QuantizeClampInt8, QuantizeClampInt8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test)
// PReLU
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index ca5110c2fd..ca6d3cbc60 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -137,6 +137,25 @@ private:
const int32_t m_Offset;
};
+class QSymmS8Decoder : public TypedIterator<const int8_t, Decoder<float>>
+{
+public:
+ QSymmS8Decoder(const int8_t* data, const float scale, const int32_t offset)
+ : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
+
+ QSymmS8Decoder(const float scale, const int32_t offset)
+ : QSymmS8Decoder(nullptr, scale, offset) {}
+
+ float Get() const override
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
+ }
+
+private:
+ const float m_Scale;
+ const int32_t m_Offset;
+};
+
class QSymm16Decoder : public TypedIterator<const int16_t, Decoder<float>>
{
public:
@@ -245,6 +264,30 @@ private:
const int32_t m_Offset;
};
+class QSymmS8Encoder : public TypedIterator<int8_t, Encoder<float>>
+{
+public:
+ QSymmS8Encoder(int8_t* data, const float scale, const int32_t offset)
+ : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
+
+ QSymmS8Encoder(const float scale, const int32_t offset)
+ : QSymmS8Encoder(nullptr, scale, offset) {}
+
+ void Set(float right) override
+ {
+ *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale, m_Offset);
+ }
+
+ float Get() const override
+ {
+ return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
+ }
+
+private:
+ const float m_Scale;
+ const int32_t m_Offset;
+};
+
class QSymm16Encoder : public TypedIterator<int16_t, Encoder<float>>
{
public:
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index b9cd7f9573..9d41c9e9e7 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -105,6 +105,13 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
{
return MakeSigned32Decoder(info, data);
}
+ case DataType::QSymmS8:
+ {
+ return std::make_unique<QSymmS8Decoder>(
+ static_cast<const int8_t*>(data),
+ info.GetQuantizationScale(),
+ info.GetQuantizationOffset());
+ }
default:
{
BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 0d578d68de..92493ed641 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -37,6 +37,13 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
+ case DataType::QSymmS8:
+ {
+ return std::make_unique<QSymmS8Encoder>(
+ static_cast<int8_t*>(data),
+ info.GetQuantizationScale(),
+ info.GetQuantizationOffset());
+ }
case armnn::DataType::QuantisedSymm16:
{
return std::make_unique<QSymm16Encoder>(
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index b7ace32e14..a78804b709 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -48,6 +48,11 @@ void RefQuantizeWorkload::Execute() const
QuantizeImpl<uint8_t>(input, output, m_NumElements, m_Scale, m_Offset);
break;
}
+ case DataType::QSymmS8:
+ {
+ QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
+ break;
+ }
case DataType::QuantisedSymm16:
{
QuantizeImpl<int16_t>(input, output, m_NumElements, m_Scale, 0);