aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp24
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp8
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp26
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp10
-rw-r--r--src/backends/neon/backend.mk2
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp10
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt4
-rw-r--r--src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp81
-rw-r--r--src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp31
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp82
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp31
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp2
12 files changed, 0 insertions, 311 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 7f311d8684..4c97855668 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -220,12 +220,8 @@ bool NeonLayerSupport::IsLayerSupported(const LayerType& type,
}
case LayerType::Constant:
return IsConstantSupported(infos[0], reasonIfUnsupported);
- case LayerType::ConvertBf16ToFp32:
- return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
case LayerType::ConvertFp16ToFp32:
return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
- case LayerType::ConvertFp32ToBf16:
- return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
case LayerType::ConvertFp32ToFp16:
return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
case LayerType::Convolution2d:
@@ -765,16 +761,6 @@ bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
output);
}
-bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- armnn::IgnoreUnused(input);
- armnn::IgnoreUnused(output);
- armnn::IgnoreUnused(reasonIfUnsupported);
- return true;
-}
-
bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -785,16 +771,6 @@ bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
return true;
}
-bool NeonLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- armnn::IgnoreUnused(input);
- armnn::IgnoreUnused(output);
- armnn::IgnoreUnused(reasonIfUnsupported);
- return true;
-}
-
bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index e916162f93..374a9049c8 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -84,18 +84,10 @@ public:
bool IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index d5a7c684d3..dccd4a3a36 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -194,24 +194,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
}
- case LayerType::ConvertBf16ToFp32 :
- {
- auto convertBf16ToFp32QueueDescriptor
- = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
- return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
- }
case LayerType::ConvertFp16ToFp32 :
{
auto convertFp16ToFp32QueueDescriptor
= PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
}
- case LayerType::ConvertFp32ToBf16 :
- {
- auto convertFp32ToBf16QueueDescriptor
- = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
- return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
- }
case LayerType::ConvertFp32ToFp16 :
{
auto convertFp32ToFp16QueueDescriptor
@@ -655,13 +643,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQue
return std::make_unique<NeonConstantWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertBf16ToFp32(
- const ConvertBf16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return std::make_unique<NeonConvertBf16ToFp32Workload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const
@@ -669,13 +650,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToBf16(
- const ConvertFp32ToBf16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return std::make_unique<NeonConvertFp32ToBf16Workload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 0c116086d4..e4f545900a 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -108,21 +108,11 @@ public:
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
"CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
- std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
- "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
"CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
- std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
- "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08")
std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index b1c0103426..bbc55547a0 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -34,8 +34,6 @@ BACKEND_SOURCES := \
workloads/NeonComparisonWorkload.cpp \
workloads/NeonConcatWorkload.cpp \
workloads/NeonConstantWorkload.cpp \
- workloads/NeonConvertBf16ToFp32Workload.cpp \
- workloads/NeonConvertFp32ToBf16Workload.cpp \
workloads/NeonConvertFp16ToFp32Workload.cpp \
workloads/NeonConvertFp32ToFp16Workload.cpp \
workloads/NeonConvolution2dWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 88e513e62f..2512821a85 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -743,12 +743,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
-// Convert from BFloat16 to Float32
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
-
-// Convert from Float32 to BFloat16
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
-
// Fully Connected
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
@@ -798,7 +792,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmU8, RankDimSize1Test<DataType::Q
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16, RankDimSize2Test<DataType::Float16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32, RankDimSize2Test<DataType::Float32>)
@@ -806,7 +799,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmU8, RankDimSize2Test<DataType::Q
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16, RankDimSize3Test<DataType::Float16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32, RankDimSize3Test<DataType::Float32>)
@@ -814,7 +806,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmU8, RankDimSize3Test<DataType::Q
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16, RankDimSize4Test<DataType::Float16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32, RankDimSize4Test<DataType::Float32>)
@@ -822,7 +813,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmU8, RankDimSize4Test<DataType::Q
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>)
// InstanceNormalization
ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index dd09ecf015..a3eb883079 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -28,12 +28,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonConcatWorkload.hpp
NeonConstantWorkload.cpp
NeonConstantWorkload.hpp
- NeonConvertBf16ToFp32Workload.cpp
- NeonConvertBf16ToFp32Workload.hpp
NeonConvertFp16ToFp32Workload.cpp
NeonConvertFp16ToFp32Workload.hpp
- NeonConvertFp32ToBf16Workload.cpp
- NeonConvertFp32ToBf16Workload.hpp
NeonConvertFp32ToFp16Workload.cpp
NeonConvertFp32ToFp16Workload.hpp
NeonConvolution2dWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
deleted file mode 100644
index 7a2ff9ac1a..0000000000
--- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConvertBf16ToFp32Workload.hpp"
-
-#include <armnnUtils/FloatingPointConverter.hpp>
-
-#include <BFloat16.hpp>
-
-#include <backendsCommon/WorkloadUtils.hpp>
-
-namespace armnn
-{
-
-NeonConvertBf16ToFp32Workload::NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("NeonConvertBf16ToFp32Workload", 1, 1);
- GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
-}
-
-void NeonConvertBf16ToFp32Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertBf16ToFp32Workload_Execute", this->GetGuid());
-
- auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
- {
- auto input = reinterpret_cast<const BFloat16*>(src);
- auto output = reinterpret_cast<float*>(dst);
- size_t numElements = size/2; // 2 bytes per Bf16
- armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output);
- };
-
- for (const auto& pair : m_TensorHandlePairs)
- {
- CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
- }
-}
-
-void NeonConvertBf16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
- ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
- this->m_Data.m_Inputs[slot] = tensorHandle;
- try
- {
- Reconfigure();
- }
- catch(armnn::UnimplementedException& e)
- {
- // Cannot reconfigure, revert the slot back and throw the exception.
- this->m_Data.m_Inputs[slot] = backupHandle;
- throw e;
- }
-}
-
-// Replace output tensor handle with the given TensorHandle
-void NeonConvertBf16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
- ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
- this->m_Data.m_Inputs[slot] = tensorHandle;
- try
- {
- Reconfigure();
- }
- catch(armnn::UnimplementedException& e)
- {
- // Cannot reconfigure, revert the slot back and throw the exception.
- this->m_Data.m_Inputs[slot] = backupHandle;
- throw e;
- }
-}
-
-void NeonConvertBf16ToFp32Workload::Reconfigure()
-{
- throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
deleted file mode 100644
index 9d44ad2cac..0000000000
--- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/backends/Workload.hpp>
-#include <armnn/backends/WorkloadData.hpp>
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-namespace armnn
-{
-
-class NeonConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>
-{
-public:
- NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info);
- virtual void Execute() const override;
- // Replace input tensor handle with the given TensorHandle
- void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-
- // Replace output tensor handle with the given TensorHandle
- void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-private:
- using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
- std::vector<TensorHandlePair> m_TensorHandlePairs;
- virtual void Reconfigure();
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
deleted file mode 100644
index acd1a1ea8f..0000000000
--- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonConvertFp32ToBf16Workload.hpp"
-
-#include <BFloat16.hpp>
-#include <Profiling.hpp>
-
-#include <armnnUtils/FloatingPointConverter.hpp>
-
-#include <backendsCommon/WorkloadUtils.hpp>
-
-namespace armnn
-{
-
-NeonConvertFp32ToBf16Workload::NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToBf16Workload", 1, 1);
- GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
-}
-
-void NeonConvertFp32ToBf16Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp32ToBf16Workload_Execute", this->GetGuid());
-
- auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
- {
- auto input = reinterpret_cast<const float*>(src);
- auto output = reinterpret_cast<BFloat16*>(dst);
- size_t numElements = size/2; // 2 bytes per bf16
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output);
- };
-
- for (const auto& pair : m_TensorHandlePairs)
- {
- CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
- }
-}
-
-void NeonConvertFp32ToBf16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
- ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
- this->m_Data.m_Inputs[slot] = tensorHandle;
- try
- {
- Reconfigure();
- }
- catch(armnn::UnimplementedException& e)
- {
- // Cannot reconfigure, revert the slot back and throw the exception.
- this->m_Data.m_Inputs[slot] = backupHandle;
- throw e;
- }
-}
-
-// Replace output tensor handle with the given TensorHandle
-void NeonConvertFp32ToBf16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
-{
- ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
- this->m_Data.m_Inputs[slot] = tensorHandle;
- try
- {
- Reconfigure();
- }
- catch(armnn::UnimplementedException& e)
- {
- // Cannot reconfigure, revert the slot back and throw the exception.
- this->m_Data.m_Inputs[slot] = backupHandle;
- throw e;
- }
-}
-
-void NeonConvertFp32ToBf16Workload::Reconfigure()
-{
- throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
deleted file mode 100644
index 2304f8a1d4..0000000000
--- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/backends/Workload.hpp>
-#include <armnn/backends/WorkloadData.hpp>
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-namespace armnn
-{
-
-class NeonConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>
-{
-public:
- NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor, const WorkloadInfo& info);
- virtual void Execute() const override;
- // Replace input tensor handle with the given TensorHandle
- void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-
- // Replace output tensor handle with the given TensorHandle
- void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
-private:
- using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
- std::vector<TensorHandlePair> m_TensorHandlePairs;
- virtual void Reconfigure();
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index c9c5421804..01fd2f7dba 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -16,9 +16,7 @@
#include "NeonComparisonWorkload.hpp"
#include "NeonConcatWorkload.hpp"
#include "NeonConstantWorkload.hpp"
-#include "NeonConvertBf16ToFp32Workload.hpp"
#include "NeonConvertFp16ToFp32Workload.hpp"
-#include "NeonConvertFp32ToBf16Workload.hpp"
#include "NeonConvertFp32ToFp16Workload.hpp"
#include "NeonConvolution2dWorkload.hpp"
#include "NeonConvolution3dWorkload.hpp"