aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2020-08-07 16:00:38 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-08-10 10:49:31 +0000
commit9cf7f88f5923dd1ba5d159ade85fcc20185c323f (patch)
tree245b89961e45a3ee4309fafef681639d1635e2c7
parent76615a5edd55b890acdd5fb078d9242e1e719a45 (diff)
downloadarmnn-9cf7f88f5923dd1ba5d159ade85fcc20185c323f.tar.gz
Update ACL pin to 4aed4aafa2ddb0b6f4b76aef5008c8bb45599ea4
* Use axis=-1 in all softmax tests as ACL only accepts this dimension (the most right dimension, in ACL is 0 and in ArmNN is -1). Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I4be83f60aa9505b4bf2367c4489e6f12d644c6d4
-rwxr-xr-xscripts/get_compute_library.sh2
-rw-r--r--src/armnn/test/CreateWorkload.hpp8
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp4
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp6
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp6
-rw-r--r--src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp13
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxWorkload.cpp13
7 files changed, 25 insertions, 27 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index 08c6b689d3..92ee2f6d67 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -10,7 +10,7 @@ CMD=$( basename $0 )
#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_20_05" # Release 20.05
#
# For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="b972ae62dd877eb53e6ad56ee124cfbc89441e2d" #COMPMID-3652 Fix CLFullyConnectedLayer failure on S10
+DEFAULT_CLFRAMEWORKREVISION="4aed4aafa2ddb0b6f4b76aef5008c8bb45599ea4" #COMPMID-3683: Fix performance regression on Mali-G76 (Fully connected) COMPMID-3682: Fix performance regression on Mali-G76 (Convolution)
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index aad6244c4b..fe73550bbc 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -947,10 +947,10 @@ std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto
{
// Create the layer we're testing.
SoftmaxDescriptor softmaxDescriptor;
- // Set Axis to 1 if CL or Neon until further Axes are supported.
+ // Set Axis to -1 if CL or Neon until further Axes are supported.
if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
{
- softmaxDescriptor.m_Axis = 1;
+ softmaxDescriptor.m_Axis = -1;
}
Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
@@ -1268,10 +1268,10 @@ std::unique_ptr<LogSoftmaxWorkload> CreateLogSoftmaxWorkloadTest(armnn::IWorkloa
{
// Create the layer we're testing.
LogSoftmaxDescriptor logSoftmaxDescriptor;
- // Set Axis to 1 if CL or Neon until further Axes are supported.
+ // Set Axis to -1 if CL or Neon until further Axes are supported.
if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
{
- logSoftmaxDescriptor.m_Axis = 0;
+ logSoftmaxDescriptor.m_Axis = -1;
}
Layer* const layer = graph.AddLayer<LogSoftmaxLayer>(logSoftmaxDescriptor, "layer");
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 6251507e51..0452919e05 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -134,10 +134,10 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
IConnectableLayer* input = net->AddInputLayer(0, "input");
SoftmaxDescriptor softmaxDescriptor;
- // Set Axis to 0 if CL or Neon until further Axes are supported.
+ // Set Axis to -1 if CL or Neon until further Axes are supported.
if ( backends.front() == armnn::Compute::CpuAcc || backends.front() == armnn::Compute::GpuAcc)
{
- softmaxDescriptor.m_Axis = 0;
+ softmaxDescriptor.m_Axis = -1;
}
IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax");
IConnectableLayer* output = net->AddOutputLayer(0, "output");
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 4a3bd44429..cdd3942d65 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -662,12 +662,6 @@ ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f)
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f)
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
-ARMNN_AUTO_TEST_CASE(Simple3dSoftmax, Simple3dSoftmaxTest, 1.0f)
-ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint8, Simple3dSoftmaxUint8Test, 1.0f)
-
-ARMNN_AUTO_TEST_CASE(Simple4dSoftmax, Simple4dSoftmaxTest, 1.0f)
-ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint8, Simple4dSoftmaxUint8Test, 1.0f)
-
// LogSoftmax
ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 345538acfe..09832dc391 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -500,12 +500,6 @@ ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f)
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f)
ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
-ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxBeta1, Simple3dSoftmaxTest, 1.0f)
-ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxBeta1Uint8, Simple3dSoftmaxUint8Test, 1.0f)
-
-ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxBeta1, Simple4dSoftmaxTest, 1.0f)
-ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxBeta1Uint8, Simple4dSoftmaxUint8Test, 1.0f)
-
// LogSoftmax
ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
diff --git a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
index 9c8ab072c5..39ff279728 100644
--- a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
@@ -23,8 +23,12 @@ arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
- return arm_compute::NELogSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
+ int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input);
+ unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input);
+ return arm_compute::NELogSoftmaxLayer::validate(&aclInputInfo,
+ &aclOutputInfo,
+ descriptor.m_Beta,
+ static_cast<int>(aclAxis));
}
NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& descriptor,
@@ -38,8 +42,9 @@ NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor&
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NELogSoftmaxLayer>(memoryManager);
- int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
- layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
+ int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
+ unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, static_cast<int>(aclAxis));
m_LogSoftmaxLayer.reset(layer.release());
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
index 37e7bfbac5..7e1501fa3e 100644
--- a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
@@ -23,8 +23,12 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
- return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
+ int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input);
+ unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input);
+ return arm_compute::NESoftmaxLayer::validate(&aclInputInfo,
+ &aclOutputInfo,
+ descriptor.m_Beta,
+ static_cast<int>(aclAxis));
}
NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor,
@@ -38,8 +42,9 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
- layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
+ int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
+ unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, static_cast<int>(aclAxis));
m_SoftmaxLayer.reset(layer.release());
}