diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2020-08-07 16:00:38 +0100 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2020-08-10 10:49:31 +0000 |
commit | 9cf7f88f5923dd1ba5d159ade85fcc20185c323f (patch) | |
tree | 245b89961e45a3ee4309fafef681639d1635e2c7 /src/backends | |
parent | 76615a5edd55b890acdd5fb078d9242e1e719a45 (diff) | |
download | armnn-9cf7f88f5923dd1ba5d159ade85fcc20185c323f.tar.gz |
Update ACL pin to 4aed4aafa2ddb0b6f4b76aef5008c8bb45599ea4
* Use axis=-1 in all softmax tests as ACL only accepts this dimension
(the most right dimension, in ACL is 0 and in ArmNN is -1).
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I4be83f60aa9505b4bf2367c4489e6f12d644c6d4
Diffstat (limited to 'src/backends')
-rw-r--r-- | src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp | 4 | ||||
-rw-r--r-- | src/backends/cl/test/ClLayerTests.cpp | 6 | ||||
-rw-r--r-- | src/backends/neon/test/NeonLayerTests.cpp | 6 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp | 13 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonSoftmaxWorkload.cpp | 13 |
5 files changed, 20 insertions, 22 deletions
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp index 6251507e51..0452919e05 100644 --- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp +++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp @@ -134,10 +134,10 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends IConnectableLayer* input = net->AddInputLayer(0, "input"); SoftmaxDescriptor softmaxDescriptor; - // Set Axis to 0 if CL or Neon until further Axes are supported. + // Set Axis to -1 if CL or Neon until further Axes are supported. if ( backends.front() == armnn::Compute::CpuAcc || backends.front() == armnn::Compute::GpuAcc) { - softmaxDescriptor.m_Axis = 0; + softmaxDescriptor.m_Axis = -1; } IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 4a3bd44429..cdd3942d65 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -662,12 +662,6 @@ ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) -ARMNN_AUTO_TEST_CASE(Simple3dSoftmax, Simple3dSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint8, Simple3dSoftmaxUint8Test, 1.0f) - -ARMNN_AUTO_TEST_CASE(Simple4dSoftmax, Simple4dSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint8, Simple4dSoftmaxUint8Test, 1.0f) - // LogSoftmax ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 345538acfe..09832dc391 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -500,12 +500,6 @@ ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) -ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxBeta1, Simple3dSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxBeta1Uint8, Simple3dSoftmaxUint8Test, 1.0f) - -ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxBeta1, Simple4dSoftmaxTest, 1.0f) -ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxBeta1Uint8, Simple4dSoftmaxUint8Test, 1.0f) - // LogSoftmax ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>) diff --git a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp index 9c8ab072c5..39ff279728 100644 --- a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp +++ b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp @@ -23,8 +23,12 @@ arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo& input, const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - int aclAxis = ComputeAclAxis(descriptor.m_Axis, input); - return arm_compute::NELogSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); + int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input); + unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input); + return arm_compute::NELogSoftmaxLayer::validate(&aclInputInfo, + &aclOutputInfo, + descriptor.m_Beta, + static_cast<int>(aclAxis)); } NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& descriptor, @@ -38,8 +42,9 @@ NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique<arm_compute::NELogSoftmaxLayer>(memoryManager); - int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); - layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); + int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); + unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]); + layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, static_cast<int>(aclAxis)); m_LogSoftmaxLayer.reset(layer.release()); } diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp index 37e7bfbac5..7e1501fa3e 100644 --- a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp @@ -23,8 +23,12 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - int aclAxis = ComputeAclAxis(descriptor.m_Axis, input); - return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); + int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input); + unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input); + return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, + &aclOutputInfo, + descriptor.m_Beta, + static_cast<int>(aclAxis)); } NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor, @@ -38,8 +42,9 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager); - int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); - layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); + int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); + unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]); + layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, static_cast<int>(aclAxis)); m_SoftmaxLayer.reset(layer.release()); } |