aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/BaseIterator.hpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/backends/reference/workloads/BaseIterator.hpp
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/backends/reference/workloads/BaseIterator.hpp')
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp60
1 files changed, 0 insertions, 60 deletions
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index e09371fd96..2d27951b73 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -260,44 +260,6 @@ private:
};
-class BFloat16Decoder : public TypedIterator<const BFloat16, Decoder<float>>
-{
-public:
- BFloat16Decoder(const BFloat16* data)
- : TypedIterator(data) {}
-
- BFloat16Decoder()
- : BFloat16Decoder(nullptr) {}
-
- float Get() const override
- {
- float val = 0.f;
- armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
- return val;
- }
- std::vector<float> DecodeTensor (const TensorShape& tensorShape,
- const bool isDepthwise) override
- {
- IgnoreUnused(isDepthwise);
-
- const unsigned int size = tensorShape.GetNumElements();
- std::vector<float> decodedTensor;
- decodedTensor.reserve(size);
-
- for (uint32_t i = 0; i < size; ++i)
- {
- this->operator[](i);
-
- float val = 0.f;
- armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
- decodedTensor.emplace_back(val);
- }
-
- return decodedTensor;
- }
-
-};
-
class Float16Decoder : public TypedIterator<const Half, Decoder<float>>
{
public:
@@ -624,28 +586,6 @@ private:
const int32_t m_Offset;
};
-class BFloat16Encoder : public TypedIterator<armnn::BFloat16, Encoder<float>>
-{
-public:
- BFloat16Encoder(armnn::BFloat16* data)
- : TypedIterator(data) {}
-
- BFloat16Encoder()
- : BFloat16Encoder(nullptr) {}
-
- void Set(float right) override
- {
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(&right, 1, m_Iterator);
- }
-
- float Get() const override
- {
- float val = 0.f;
- armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val);
- return val;
- }
-};
-
class Float16Encoder : public TypedIterator<Half, Encoder<float>>
{
public: