From ec81999c4f41380b8181672cb73b5bf6bf08e5c3 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Thu, 10 Feb 2022 14:47:13 +0000 Subject: IVGCVSW-6700 Add override functions to all typed Neon/CL workloads * Neon and CL workloads which inherit from TypedWorkload instead of the BaseWorklod for their backend do not contain the correct ReplaceInputTensorHandle/ReplaceOutputTensorHandle and Reconfigure functions. So they have been added directly. * Removed the Profiling call from ClConvolution2dWorkload::Reconfigure() to avoid a segfault Signed-off-by: David Monahan Change-Id: I7b9d1b48fdb17db1662dc03c22acc746340ce73f --- .../workloads/NeonConvertBf16ToFp32Workload.cpp | 38 +++++++++++++++++++++ .../workloads/NeonConvertBf16ToFp32Workload.hpp | 5 +++ .../workloads/NeonConvertFp16ToFp32Workload.cpp | 38 +++++++++++++++++++++ .../workloads/NeonConvertFp16ToFp32Workload.hpp | 5 +++ .../workloads/NeonConvertFp32ToBf16Workload.cpp | 38 +++++++++++++++++++++ .../workloads/NeonConvertFp32ToBf16Workload.hpp | 5 +++ .../workloads/NeonConvertFp32ToFp16Workload.cpp | 38 +++++++++++++++++++++ .../workloads/NeonConvertFp32ToFp16Workload.hpp | 5 +++ .../neon/workloads/NeonFloorFloatWorkload.cpp | 39 ++++++++++++++++++++++ .../neon/workloads/NeonFloorFloatWorkload.hpp | 5 +++ .../workloads/NeonL2NormalizationFloatWorkload.cpp | 38 +++++++++++++++++++++ .../workloads/NeonL2NormalizationFloatWorkload.hpp | 5 +++ .../neon/workloads/NeonLstmFloatWorkload.cpp | 38 +++++++++++++++++++++ .../neon/workloads/NeonLstmFloatWorkload.hpp | 5 +++ .../workloads/NeonNormalizationFloatWorkload.cpp | 38 +++++++++++++++++++++ .../workloads/NeonNormalizationFloatWorkload.hpp | 5 +++ 16 files changed, 345 insertions(+) (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp index dcef025a3d..7a2ff9ac1a 100644 --- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp +++ b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp @@ -40,4 +40,42 @@ void NeonConvertBf16ToFp32Workload::Execute() const } } +void NeonConvertBf16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonConvertBf16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonConvertBf16ToFp32Workload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp index 9770fbdbb0..9d44ad2cac 100644 --- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp +++ b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp @@ -17,10 +17,15 @@ class NeonConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload; std::vector m_TensorHandlePairs; + virtual void Reconfigure(); }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp index 1b9e1bcfb5..ce6c785329 100644 --- a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp +++ b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp @@ -40,4 +40,42 @@ void NeonConvertFp16ToFp32Workload::Execute() const } } +void NeonConvertFp16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonConvertFp16ToFp32Workload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp index 9159e51f8b..c0165eae78 100644 --- a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp +++ b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp @@ -17,10 +17,15 @@ class NeonConvertFp16ToFp32Workload : public Float16ToFloat32Workload; std::vector m_TensorHandlePairs; + virtual void Reconfigure(); }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp index ac6a69d21a..acd1a1ea8f 100644 --- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp +++ b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp @@ -41,4 +41,42 @@ void NeonConvertFp32ToBf16Workload::Execute() const } } +void NeonConvertFp32ToBf16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonConvertFp32ToBf16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonConvertFp32ToBf16Workload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp index 6c0118712f..2304f8a1d4 100644 --- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp +++ b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp @@ -17,10 +17,15 @@ class NeonConvertFp32ToBf16Workload : public Float32ToBFloat16Workload; std::vector m_TensorHandlePairs; + virtual void Reconfigure(); }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp index d65cba046b..089716a4b4 100644 --- a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp +++ b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp @@ -41,4 +41,42 @@ void NeonConvertFp32ToFp16Workload::Execute() const } } +void NeonConvertFp32ToFp16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonConvertFp32ToFp16Workload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp index 8e9f11b857..666f48794b 100644 --- a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp +++ b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp @@ -17,10 +17,15 @@ class NeonConvertFp32ToFp16Workload : public Float32ToFloat16Workload; std::vector m_TensorHandlePairs; + virtual void Reconfigure(); }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp index b97e3cef75..1d53245c5f 100644 --- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp @@ -32,6 +32,45 @@ void NeonFloorFloatWorkload::Execute() const ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFloorFloatWorkload_Execute", this->GetGuid()); m_Layer->run(); } + +void NeonFloorFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonFloorFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonFloorFloatWorkload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp index 7113931673..8ba6b4a5c5 100644 --- a/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp +++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp @@ -20,9 +20,14 @@ class NeonFloorFloatWorkload : public FloatWorkload public: NeonFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info); virtual void Execute() const override; + // Replace input tensor handle with the given TensorHandle + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; + // Replace output tensor handle with the given TensorHandle + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; private: std::unique_ptr m_Layer; + virtual void Reconfigure(); }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index 887f25a333..c0c6ed4982 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -60,4 +60,42 @@ void NeonL2NormalizationFloatWorkload::Execute() const m_Layer->run(); } +void NeonL2NormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonL2NormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonL2NormalizationFloatWorkload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp index 82f0639e9c..9c591fc7a7 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp @@ -26,9 +26,14 @@ public: NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info, std::shared_ptr& memoryManager); virtual void Execute() const override; + // Replace input tensor handle with the given TensorHandle + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; + // Replace output tensor handle with the given TensorHandle + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; private: std::unique_ptr m_Layer; + virtual void Reconfigure(); }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp index b8224e6ca1..2f14ab9022 100644 --- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp @@ -464,4 +464,42 @@ void NeonLstmFloatWorkload::FreeUnusedTensors() FreeTensorIfUnused(m_OutputLayerNormWeightsTensor); } +void NeonLstmFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonLstmFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonLstmFloatWorkload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp index ebbf180371..4bb3ff823e 100644 --- a/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp +++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp @@ -21,7 +21,11 @@ class NeonLstmFloatWorkload : public FloatWorkload public: NeonLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info); virtual void Execute() const override; + // Replace input tensor handle with the given TensorHandle + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; + // Replace output tensor handle with the given TensorHandle + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; private: mutable arm_compute::NELSTMLayer m_LstmLayer; @@ -51,6 +55,7 @@ private: std::unique_ptr m_OutputLayerNormWeightsTensor; void FreeUnusedTensors(); + virtual void Reconfigure(); }; arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn, diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp index f811a0457e..01ac5c1b64 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp @@ -110,4 +110,42 @@ void NeonNormalizationFloatWorkload::Execute() const m_NormalizationLayer->run(); } +void NeonNormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +// Replace output tensor handle with the given TensorHandle +void NeonNormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) +{ + ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; + this->m_Data.m_Inputs[slot] = tensorHandle; + try + { + Reconfigure(); + } + catch(armnn::UnimplementedException& e) + { + // Cannot reconfigure, revert the slot back and throw the exception. + this->m_Data.m_Inputs[slot] = backupHandle; + throw e; + } +} + +void NeonNormalizationFloatWorkload::Reconfigure() +{ + throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp index ed5453619e..9605ed1543 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp @@ -26,9 +26,14 @@ public: NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info, std::shared_ptr& memoryManager); virtual void Execute() const override; + // Replace input tensor handle with the given TensorHandle + void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; + // Replace output tensor handle with the given TensorHandle + void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; private: std::unique_ptr m_NormalizationLayer; + virtual void Reconfigure(); }; } //namespace armnn -- cgit v1.2.1