aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <David.Monahan@arm.com>2022-02-10 14:47:13 +0000
committerDavid Monahan <david.monahan@arm.com>2022-02-10 14:52:37 +0000
commitec81999c4f41380b8181672cb73b5bf6bf08e5c3 (patch)
tree37d1dec4b6a4c7c2e90b91d107de767dcda1f0e1
parentc92bbd79eb0c3187031e33c8d6d70773a7d81737 (diff)
downloadarmnn-ec81999c4f41380b8181672cb73b5bf6bf08e5c3.tar.gz
IVGCVSW-6700 Add override functions to all typed Neon/CL workloads
* Neon and CL workloads which inherit from TypedWorkload instead of the BaseWorklod for their backend do not contain the correct ReplaceInputTensorHandle/ReplaceOutputTensorHandle and Reconfigure functions. So they have been added directly. * Removed the Profiling call from ClConvolution2dWorkload::Reconfigure() to avoid a segfault Signed-off-by: David Monahan <David.Monahan@arm.com> Change-Id: I7b9d1b48fdb17db1662dc03c22acc746340ce73f
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp38
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp7
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp37
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp6
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp37
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp5
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClFloorFloatWorkload.cpp39
-rw-r--r--src/backends/cl/workloads/ClFloorFloatWorkload.hpp5
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp38
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp6
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp38
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.hpp5
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp38
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonFloorFloatWorkload.cpp39
-rw-r--r--src/backends/neon/workloads/NeonFloorFloatWorkload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.hpp5
-rw-r--r--src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp38
-rw-r--r--src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp5
31 files changed, 649 insertions, 2 deletions
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index 992abc2f56..389605f17d 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -124,4 +124,42 @@ void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
FreeTensorIfUnused(m_Beta);
}
+void ClBatchNormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClBatchNormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClBatchNormalizationFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp
index dc76703382..d47663671e 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.hpp
@@ -32,6 +32,12 @@ public:
using FloatWorkload<BatchNormalizationQueueDescriptor>::FloatWorkload;
void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+
private:
mutable arm_compute::CLBatchNormalizationLayer m_Layer;
@@ -41,6 +47,7 @@ private:
std::unique_ptr<arm_compute::CLTensor> m_Beta;
void FreeUnusedTensors();
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 867770a112..d4110df0ae 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -57,5 +57,42 @@ arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input,
return aclStatus;
}
+void ClConvertFp16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClConvertFp16ToFp32Workload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
index b392c0be2e..e78ffa4aff 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp
@@ -21,8 +21,14 @@ public:
const arm_compute::CLCompileContext& clCompileContext);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
mutable arm_compute::CLDepthConvertLayer m_Layer;
+ virtual void Reconfigure();
};
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 017fcaf454..d4ae925d9f 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -57,5 +57,42 @@ arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input,
return aclStatus;
}
+void ClConvertFp32ToFp16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClConvertFp32ToFp16Workload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
index 1d777b5256..a4baef8555 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp
@@ -20,9 +20,14 @@ public:
const WorkloadInfo& info,
const arm_compute::CLCompileContext& clCompileContext);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
mutable arm_compute::CLDepthConvertLayer m_Layer;
+ virtual void Reconfigure();
};
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index cdfa885f67..bf82fbf255 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -180,9 +180,9 @@ void ClConvolution2dWorkload::FreeUnusedTensors()
void ClConvolution2dWorkload::Reconfigure()
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution2dWorkload_Reconfigure");
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
m_InputProxy->set(&input);
m_OutputProxy->set(&output);
}
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
index 679e225c63..0aae1a30e3 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
@@ -29,7 +29,6 @@ ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descripto
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFloorFloatWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output);
@@ -42,4 +41,42 @@ void ClFloorFloatWorkload::Execute() const
RunClFunction(m_Layer, CHECK_LOCATION());
}
+void ClFloorFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClFloorFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClFloorFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.hpp b/src/backends/cl/workloads/ClFloorFloatWorkload.hpp
index 5740c6887a..dbe5f6f163 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.hpp
@@ -23,9 +23,14 @@ public:
const arm_compute::CLCompileContext& clCompileContext);
void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
mutable arm_compute::CLFloor m_Layer;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index b34153fff0..d120fb28f6 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -60,4 +60,42 @@ void ClL2NormalizationFloatWorkload::Execute() const
RunClFunction(m_Layer, CHECK_LOCATION());
}
+void ClL2NormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClL2NormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClL2NormalizationFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
index cfa1a97eec..67e7b8b7b1 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
@@ -24,10 +24,16 @@ public:
const arm_compute::CLCompileContext& clCompileContext);
void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
// Purposely not a CLL2Normalize function. See constructor.
mutable arm_compute::CLL2NormalizeLayer m_Layer;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index d8d95f5c74..37dfab6a5f 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -446,4 +446,42 @@ void ClLstmFloatWorkload::FreeUnusedTensors()
FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
}
+void ClLstmFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClLstmFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClLstmFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.hpp b/src/backends/cl/workloads/ClLstmFloatWorkload.hpp
index b9faca8b54..54c5c600dc 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.hpp
@@ -22,9 +22,14 @@ public:
const WorkloadInfo& info,
const arm_compute::CLCompileContext& clCompileContext);
void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
mutable arm_compute::CLLSTMLayer m_LstmLayer;
+ virtual void Reconfigure();
std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index d98532d7d1..8de8dd5c3b 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -62,4 +62,42 @@ void ClNormalizationFloatWorkload::Execute() const
RunClFunction(m_NormalizationLayer, CHECK_LOCATION());
}
+void ClNormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void ClNormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void ClNormalizationFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.hpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.hpp
index 40b2693cd4..d9db0f2de3 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.hpp
@@ -23,9 +23,14 @@ public:
const WorkloadInfo& info,
const arm_compute::CLCompileContext& clCompileContext);
void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
mutable arm_compute::CLNormalizationLayer m_NormalizationLayer;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
index dcef025a3d..7a2ff9ac1a 100644
--- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp
@@ -40,4 +40,42 @@ void NeonConvertBf16ToFp32Workload::Execute() const
}
}
+void NeonConvertBf16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonConvertBf16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonConvertBf16ToFp32Workload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
index 9770fbdbb0..9d44ad2cac 100644
--- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
+++ b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp
@@ -17,10 +17,15 @@ class NeonConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf
public:
NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
std::vector<TensorHandlePair> m_TensorHandlePairs;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp
index 1b9e1bcfb5..ce6c785329 100644
--- a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp
@@ -40,4 +40,42 @@ void NeonConvertFp16ToFp32Workload::Execute() const
}
}
+void NeonConvertFp16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonConvertFp16ToFp32Workload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp
index 9159e51f8b..c0165eae78 100644
--- a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp
+++ b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.hpp
@@ -17,10 +17,15 @@ class NeonConvertFp16ToFp32Workload : public Float16ToFloat32Workload<ConvertFp1
public:
NeonConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
std::vector<TensorHandlePair> m_TensorHandlePairs;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
index ac6a69d21a..acd1a1ea8f 100644
--- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp
@@ -41,4 +41,42 @@ void NeonConvertFp32ToBf16Workload::Execute() const
}
}
+void NeonConvertFp32ToBf16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonConvertFp32ToBf16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonConvertFp32ToBf16Workload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
index 6c0118712f..2304f8a1d4 100644
--- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
+++ b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp
@@ -17,10 +17,15 @@ class NeonConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp
public:
NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
std::vector<TensorHandlePair> m_TensorHandlePairs;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp
index d65cba046b..089716a4b4 100644
--- a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp
@@ -41,4 +41,42 @@ void NeonConvertFp32ToFp16Workload::Execute() const
}
}
+void NeonConvertFp32ToFp16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonConvertFp32ToFp16Workload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp
index 8e9f11b857..666f48794b 100644
--- a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp
+++ b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.hpp
@@ -17,10 +17,15 @@ class NeonConvertFp32ToFp16Workload : public Float32ToFloat16Workload<ConvertFp3
public:
NeonConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
std::vector<TensorHandlePair> m_TensorHandlePairs;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
index b97e3cef75..1d53245c5f 100644
--- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
@@ -32,6 +32,45 @@ void NeonFloorFloatWorkload::Execute() const
ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFloorFloatWorkload_Execute", this->GetGuid());
m_Layer->run();
}
+
+void NeonFloorFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonFloorFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonFloorFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp
index 7113931673..8ba6b4a5c5 100644
--- a/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.hpp
@@ -20,9 +20,14 @@ class NeonFloorFloatWorkload : public FloatWorkload<FloorQueueDescriptor>
public:
NeonFloorFloatWorkload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
std::unique_ptr<arm_compute::IFunction> m_Layer;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index 887f25a333..c0c6ed4982 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -60,4 +60,42 @@ void NeonL2NormalizationFloatWorkload::Execute() const
m_Layer->run();
}
+void NeonL2NormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonL2NormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonL2NormalizationFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp
index 82f0639e9c..9c591fc7a7 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp
@@ -26,9 +26,14 @@ public:
NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
std::unique_ptr<arm_compute::IFunction> m_Layer;
+ virtual void Reconfigure();
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index b8224e6ca1..2f14ab9022 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -464,4 +464,42 @@ void NeonLstmFloatWorkload::FreeUnusedTensors()
FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
}
+void NeonLstmFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonLstmFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonLstmFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp
index ebbf180371..4bb3ff823e 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.hpp
@@ -21,7 +21,11 @@ class NeonLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
public:
NeonLstmFloatWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
mutable arm_compute::NELSTMLayer m_LstmLayer;
@@ -51,6 +55,7 @@ private:
std::unique_ptr<arm_compute::Tensor> m_OutputLayerNormWeightsTensor;
void FreeUnusedTensors();
+ virtual void Reconfigure();
};
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index f811a0457e..01ac5c1b64 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -110,4 +110,42 @@ void NeonNormalizationFloatWorkload::Execute() const
m_NormalizationLayer->run();
}
+void NeonNormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+// Replace output tensor handle with the given TensorHandle
+void NeonNormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
+{
+ ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+ this->m_Data.m_Inputs[slot] = tensorHandle;
+ try
+ {
+ Reconfigure();
+ }
+ catch(armnn::UnimplementedException& e)
+ {
+ // Cannot reconfigure, revert the slot back and throw the exception.
+ this->m_Data.m_Inputs[slot] = backupHandle;
+ throw e;
+ }
+}
+
+void NeonNormalizationFloatWorkload::Reconfigure()
+{
+ throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+}
+
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp
index ed5453619e..9605ed1543 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.hpp
@@ -26,9 +26,14 @@ public:
NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
+ // Replace input tensor handle with the given TensorHandle
+ void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
+ // Replace output tensor handle with the given TensorHandle
+ void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
private:
std::unique_ptr<arm_compute::IFunction> m_NormalizationLayer;
+ virtual void Reconfigure();
};
} //namespace armnn