aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2019-07-26 15:44:17 +0100
committerFrancis Murtagh <francis.murtagh@arm.com>2019-07-26 15:44:17 +0100
commit3b9383548309a8f9121668826a628ec250c57a1c (patch)
tree01cc4e162008aea23bfe9ae167ded0e8f9abc39c
parent82b15eda6f87a20bc31256f5e85eb4082d3d0591 (diff)
downloadarmnn-3b9383548309a8f9121668826a628ec250c57a1c.tar.gz
IVGCVSW-3554 Update workloads to pass Softmax Axis Parameter to Backends
* Add check in CL and Neon to ensure axis is 1 otherwise return unsupported. * Edit CreateWorkload test and JsonPrinter test to ensure axis of 1. Change-Id: I499b405532e26fefc2dd1c18b6dc6005813b5604 Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
-rw-r--r--src/armnn/test/CreateWorkload.hpp7
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp1
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp8
-rw-r--r--src/backends/cl/ClLayerSupport.cpp8
-rw-r--r--src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp4
-rw-r--r--src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp2
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp6
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp3
11 files changed, 41 insertions, 11 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 4181db26d0..619c6d02cd 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -617,8 +617,13 @@ std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto
{
// Create the layer we're testing.
SoftmaxDescriptor softmaxDescriptor;
- Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
+ // Set Axis to 1 if CL or Neon until further Axes are supported.
+ if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
+ {
+ softmaxDescriptor.m_Axis = 1;
+ }
+ Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
// Create extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index 4027e9481b..c73365bae1 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -1220,6 +1220,7 @@ void CaffeParserBase::ParseSoftmaxLayer(const LayerParameter& layerParam)
// Engine
armnn::SoftmaxDescriptor softmaxDescriptor;
+ softmaxDescriptor.m_Axis = 1;
armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer(
softmaxDescriptor,
layerParam.name().c_str());
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 4591def34f..c7870d9672 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -134,7 +134,13 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
INetworkPtr net(INetwork::Create());
IConnectableLayer* input = net->AddInputLayer(0, "input");
- IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
+ SoftmaxDescriptor softmaxDescriptor;
+ // Set Axis to 1 if CL or Neon until further Axes are supported.
+ if ( backends.front() == armnn::Compute::CpuAcc || backends.front() == armnn::Compute::GpuAcc)
+ {
+ softmaxDescriptor.m_Axis = 1;
+ }
+ IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax");
IConnectableLayer* output = net->AddOutputLayer(0, "output");
input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 7eb1dcf39a..b737daf7f4 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -594,8 +594,14 @@ bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ if (!(descriptor.m_Axis == 1 ||
+ (descriptor.m_Axis < 0 && static_cast<int>(input.GetNumDimensions()) + descriptor.m_Axis == 1)))
+ {
+ SetValueChecked(reasonIfUnsupported, "Cl Softmax: Only supports Axis equal to 1.");
+ return false;
+ }
ignore_unused(descriptor);
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
diff --git a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp
index b1dc404a6f..2f6d380f94 100644
--- a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp
@@ -6,6 +6,7 @@
#include "ClSoftmaxBaseWorkload.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
@@ -13,12 +14,14 @@ namespace armnn
{
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output)
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor)
{
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(input);
+ return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
}
diff --git a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp
index b800056cdf..8d73060162 100644
--- a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp
+++ b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp
@@ -5,6 +5,7 @@
#pragma once
+#include <armnn/Descriptors.hpp>
#include <armnn/Tensor.hpp>
#include <arm_compute/core/Error.h>
@@ -12,6 +13,7 @@ namespace armnn
{
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output);
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor);
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
index c78ab039ef..f2f8d17901 100644
--- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
@@ -14,7 +14,7 @@ namespace armnn
{
ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
, m_SoftmaxLayer(memoryManager)
{
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index cb709c315a..e856210503 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -538,6 +538,12 @@ bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ if (!(descriptor.m_Axis == 1 ||
+ (descriptor.m_Axis < 0 && static_cast<int>(input.GetNumDimensions()) + descriptor.m_Axis == 1)))
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Softmax: Only supports Axis equal to 1.");
+ return false;
+ }
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
index b229bc48a2..8acb775344 100644
--- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
@@ -6,6 +6,7 @@
#include "NeonSoftmaxBaseWorkload.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
@@ -19,7 +20,8 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(input);
+ return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
index 19c50db15b..4dc913134c 100644
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
@@ -23,8 +23,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor&
arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
m_SoftmaxLayer.reset(layer.release());
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index 363c1502f5..8e83914dcb 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -33,9 +33,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
}
- unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
-
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis);
m_SoftmaxLayer.reset(layer.release());
}