diff options
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r-- | src/backends/reference/workloads/BaseIterator.hpp | 32 | ||||
-rw-r--r-- | src/backends/reference/workloads/RefElementwiseWorkload.cpp | 22 |
2 files changed, 54 insertions, 0 deletions
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index cfa8ce7e91..95c75a576a 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -112,6 +112,22 @@ public: } }; +class QSymm16Decoder : public TypedIterator<const int16_t, Decoder> +{ +public: + QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset) + : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + + float Get() const override + { + return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); + } + +private: + const float m_Scale; + const int32_t m_Offset; +}; + class FloatEncoder : public TypedIterator<float, Encoder> { public: @@ -152,4 +168,20 @@ public: } }; +class QSymm16Encoder : public TypedIterator<int16_t, Encoder> +{ +public: + QSymm16Encoder(int16_t* data, const float scale, const int32_t offset) + : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + + void Set(const float& right) override + { + *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset); + } + +private: + const float m_Scale; + const int32_t m_Offset; +}; + } //namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp index 6e6e1d5f21..1a30e7c9fb 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp @@ -64,6 +64,28 @@ void RefElementwiseWorkload<Functor, ParentDescriptor, DebugString>::Execute() c encodeIterator0); break; } + case armnn::DataType::QuantisedSymm16: + { + QSymm16Decoder decodeIterator0(GetInputTensorData<int16_t>(0, m_Data), + inputInfo0.GetQuantizationScale(), + inputInfo0.GetQuantizationOffset()); + + QSymm16Decoder decodeIterator1(GetInputTensorData<int16_t>(1, m_Data), + inputInfo1.GetQuantizationScale(), + inputInfo1.GetQuantizationOffset()); + + QSymm16Encoder encodeIterator0(GetOutputTensorData<int16_t>(0, m_Data), + outputInfo.GetQuantizationScale(), + outputInfo.GetQuantizationOffset()); + + ElementwiseFunction<Functor, Decoder, Encoder>(inShape0, + inShape1, + outShape, + decodeIterator0, + decodeIterator1, + encodeIterator0); + break; + } default: BOOST_ASSERT_MSG(false, "RefElementwiseWorkload: Not supported Data Type!"); break; |