diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-03-21 09:27:08 +0000 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-04-01 12:36:11 +0100 |
commit | a3f4fbaf9ce6e30b3d1337bdfbb47b7301f97d1d (patch) | |
tree | ce8d29eefd17a49fe431eba26e75d23dbde5aa81 /src/backends/cl/workloads | |
parent | ebbf1d4853afd2f96bca83bfb4c225a78f94ea93 (diff) | |
download | armnn-a3f4fbaf9ce6e30b3d1337bdfbb47b7301f97d1d.tar.gz |
IVGCVSW-6732 Tests surrounded in '#if defined(ARMNNREF_ENABLED)' in android-nn-driver do not execute.
* Change to src/backends/cl/workloads/ClLstmFloatWorkload.cpp fix LstmTests_GpuAcc tests.
* Change to src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp & ClConvertFp32ToFp16Workload.hpp
fix MeanTests_GpuAcc and Convolution2DTests_1.1 tests.
* Added UnitTests to src/backends/cl/test/ClImportTensorHandleTests.cpp to test import on Convert Layers.
!android-nn-driver:7264
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I0c46dc4b9c54eca8771ab12ed0302b6224606957
Diffstat (limited to 'src/backends/cl/workloads')
-rw-r--r-- | src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp | 6 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp | 6 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClLstmFloatWorkload.cpp | 2 |
3 files changed, 7 insertions, 7 deletions
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp index 8ccf157aca..4ac1274130 100644 --- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp @@ -80,8 +80,8 @@ void ClConvertFp16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensor // Replace output tensor handle with the given TensorHandle void ClConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) { - ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; - this->m_Data.m_Inputs[slot] = tensorHandle; + ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot]; + this->m_Data.m_Outputs[slot] = tensorHandle; try { Reconfigure(); @@ -89,7 +89,7 @@ void ClConvertFp16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tenso catch(armnn::UnimplementedException& e) { // Cannot reconfigure, revert the slot back and throw the exception. - this->m_Data.m_Inputs[slot] = backupHandle; + this->m_Data.m_Outputs[slot] = backupHandle; throw e; } } diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp index a44a80c997..307314d784 100644 --- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp @@ -80,8 +80,8 @@ void ClConvertFp32ToFp16Workload::ReplaceInputTensorHandle(ITensorHandle* tensor // Replace output tensor handle with the given TensorHandle void ClConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) { - ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; - this->m_Data.m_Inputs[slot] = tensorHandle; + ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot]; + this->m_Data.m_Outputs[slot] = tensorHandle; try { Reconfigure(); @@ -89,7 +89,7 @@ void ClConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tenso catch(armnn::UnimplementedException& e) { // Cannot reconfigure, revert the slot back and throw the exception. - this->m_Data.m_Inputs[slot] = backupHandle; + this->m_Data.m_Outputs[slot] = backupHandle; throw e; } } diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp index e190f33bbc..d20c6fc7b5 100644 --- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp @@ -310,7 +310,7 @@ arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const T if (paramsInfo.m_ProjectionBias != nullptr) { - aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias()); + aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias()); } lstm_params_info.set_projection_params(&aclProjectionWeightsInfo, paramsInfo.m_ProjectionBias != nullptr ? |