aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-04-30 11:39:37 +0100
committerKevin May <kevin.may@arm.com>2020-04-30 10:46:12 +0000
commitbe88a57579a9a848efe13e6c524b5b104b871733 (patch)
treed5dc03627048f8ecd2d728b154434244f05475ea
parent9937f9359ac4eeefc3535b66eddddd1b4f067c54 (diff)
downloadarmnn-be88a57579a9a848efe13e6c524b5b104b871733.tar.gz
IVGCVSW-4753 Fix CpuAcc Hal 1.3 Softmax Failures
* Refactor Neon Softmax workload to accept supported data types Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I54aa72d5cbb862cafcc1eabe48f6a00d61050cd7
-rw-r--r--src/armnn/test/CreateWorkload.hpp11
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp3
-rw-r--r--src/backends/neon/backend.mk4
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp29
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt8
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp28
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp18
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp41
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp30
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp51
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp30
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxWorkload.cpp53
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxWorkload.hpp36
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp3
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp16
16 files changed, 145 insertions, 218 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index b6ffd216e0..05d0e2f4ec 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -799,6 +799,17 @@ std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto
// Connect up
armnn::TensorInfo tensorInfo({4, 1}, DataType);
+ if (DataType == armnn::DataType::QAsymmU8)
+ {
+ tensorInfo.SetQuantizationOffset(0);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+ else if (DataType == armnn::DataType::QAsymmS8)
+ {
+ tensorInfo.SetQuantizationOffset(-128);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+
Connect(input, layer, tensorInfo);
Connect(layer, output, tensorInfo);
CreateTensorHandles(graph, factory);
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index f47601a1c2..b095ed5629 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -54,7 +54,7 @@
#include "workloads/NeonResizeWorkload.hpp"
#include "workloads/NeonRsqrtWorkload.hpp"
#include "workloads/NeonSliceWorkload.hpp"
-#include "workloads/NeonSoftmaxBaseWorkload.hpp"
+#include "workloads/NeonSoftmaxWorkload.hpp"
#include "workloads/NeonSpaceToBatchNdWorkload.hpp"
#include "workloads/NeonSpaceToDepthWorkload.hpp"
#include "workloads/NeonSplitterWorkload.hpp"
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 56ee9a7cd8..4a3533186a 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -474,8 +474,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDesc
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NeonSoftmaxFloatWorkload, NeonSoftmaxUint8Workload>(
- descriptor, info, m_MemoryManager->GetIntraLayerManager());
+ return std::make_unique<NeonSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 460b68ae0c..2bba74a79d 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -62,9 +62,7 @@ BACKEND_SOURCES := \
workloads/NeonResizeWorkload.cpp \
workloads/NeonRsqrtWorkload.cpp \
workloads/NeonSliceWorkload.cpp \
- workloads/NeonSoftmaxBaseWorkload.cpp \
- workloads/NeonSoftmaxFloatWorkload.cpp \
- workloads/NeonSoftmaxUint8Workload.cpp \
+ workloads/NeonSoftmaxWorkload.cpp \
workloads/NeonSpaceToBatchNdWorkload.cpp \
workloads/NeonSpaceToDepthWorkload.cpp \
workloads/NeonSplitterWorkload.cpp \
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index a89602db7f..0af9bf3e0d 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -582,20 +582,41 @@ static void NeonCreateSoftmaxWorkloadTest()
SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType)));
+ armnn::TensorInfo tensorInfo({4, 1}, DataType);
+ if (DataType == armnn::DataType::QAsymmU8)
+ {
+ tensorInfo.SetQuantizationOffset(0);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+ else if (DataType == armnn::DataType::QAsymmS8)
+ {
+ tensorInfo.SetQuantizationOffset(-128);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+ BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+ BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
{
- NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float16>();
+ NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float16>();
}
#endif
BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
{
- NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float32>();
+ NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+{
+ NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmU8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+{
+ NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmS8>();
}
template <typename SpaceToDepthWorkloadType, typename armnn::DataType DataType>
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 0c02b5cf22..c5548bf28f 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -86,12 +86,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonRsqrtWorkload.hpp
NeonSliceWorkload.cpp
NeonSliceWorkload.hpp
- NeonSoftmaxBaseWorkload.cpp
- NeonSoftmaxBaseWorkload.hpp
- NeonSoftmaxFloatWorkload.cpp
- NeonSoftmaxFloatWorkload.hpp
- NeonSoftmaxUint8Workload.cpp
- NeonSoftmaxUint8Workload.hpp
+ NeonSoftmaxWorkload.cpp
+ NeonSoftmaxWorkload.hpp
NeonSpaceToBatchNdWorkload.cpp
NeonSpaceToBatchNdWorkload.hpp
NeonSpaceToDepthWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
deleted file mode 100644
index 41ebfb9fd3..0000000000
--- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonSoftmaxBaseWorkload.hpp"
-
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <aclCommon/ArmComputeUtils.hpp>
-
-#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
-
-namespace armnn
-{
-
-arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor)
-{
- const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input);
- return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
-}
-
-} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp
deleted file mode 100644
index 6eecb9787d..0000000000
--- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Descriptors.hpp>
-#include <arm_compute/core/Error.h>
-
-namespace armnn
-{
-
-arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor);
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
deleted file mode 100644
index a4690a7985..0000000000
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonSoftmaxFloatWorkload.hpp"
-
-#include "NeonWorkloadUtils.hpp"
-
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
-
-namespace armnn
-{
-
-NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
-
- // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
- arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
- layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
- m_SoftmaxLayer.reset(layer.release());
-}
-
-void NeonSoftmaxFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloatWorkload_Execute");
- m_SoftmaxLayer->run();
-}
-
-} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp
deleted file mode 100644
index 77f2cc3d01..0000000000
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <arm_compute/runtime/IFunction.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-class NeonSoftmaxFloatWorkload : public FloatWorkload<SoftmaxQueueDescriptor>
-{
-public:
- NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
- virtual void Execute() const override;
-
-private:
- std::unique_ptr<arm_compute::IFunction> m_SoftmaxLayer;
-};
-
-} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
deleted file mode 100644
index 05d93b963c..0000000000
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonSoftmaxUint8Workload.hpp"
-#include "NeonWorkloadUtils.hpp"
-
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
-
-namespace armnn
-{
-
-NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
-
- arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- const auto outputQuantization = output.info()->quantization_info();
-
- if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) ||
- (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) ||
- outputQuantization.scale().empty() || outputQuantization.offset().empty())
- {
- throw InvalidArgumentException(
- "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
- }
-
- auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
- layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis);
- m_SoftmaxLayer.reset(layer.release());
-}
-
-void NeonSoftmaxUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute");
-
- m_SoftmaxLayer->run();
-}
-
-} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp
deleted file mode 100644
index c5692084c9..0000000000
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <arm_compute/runtime/IFunction.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-class NeonSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor>
-{
-public:
- NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
- virtual void Execute() const override;
-
-private:
- std::unique_ptr<arm_compute::IFunction> m_SoftmaxLayer;
-};
-
-} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
new file mode 100644
index 0000000000..149804bdd6
--- /dev/null
+++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
@@ -0,0 +1,53 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonSoftmaxWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor)
+{
+ const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input);
+ return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
+}
+
+NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+ : BaseWorkload<SoftmaxQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonSoftmaxWorkload", 1, 1);
+
+ // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
+ m_SoftmaxLayer.reset(layer.release());
+}
+
+void NeonSoftmaxWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxWorkload_Execute");
+ m_SoftmaxLayer->run();
+}
+
+} //namespace armnn
+
diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.hpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.hpp
new file mode 100644
index 0000000000..26081e10fb
--- /dev/null
+++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.hpp
@@ -0,0 +1,36 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/runtime/MemoryManagerOnDemand.h>
+
+#include <memory>
+
+namespace armnn
+{
+
+arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor);
+
+class NeonSoftmaxWorkload : public BaseWorkload<SoftmaxQueueDescriptor>
+{
+public:
+ NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ virtual void Execute() const override;
+
+private:
+ std::unique_ptr<arm_compute::IFunction> m_SoftmaxLayer;
+};
+
+} //namespace armnn
+
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 4117a3dd8c..9da698fc8c 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -45,8 +45,7 @@
#include "NeonResizeWorkload.hpp"
#include "NeonRsqrtWorkload.hpp"
#include "NeonSliceWorkload.hpp"
-#include "NeonSoftmaxFloatWorkload.hpp"
-#include "NeonSoftmaxUint8Workload.hpp"
+#include "NeonSoftmaxWorkload.hpp"
#include "NeonSpaceToBatchNdWorkload.hpp"
#include "NeonSpaceToDepthWorkload.hpp"
#include "NeonSplitterWorkload.hpp"
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 29bfbc0ee2..4a57df7d6a 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -504,10 +504,22 @@ static void RefCreateSoftmaxWorkloadTest()
auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
+
+ armnn::TensorInfo tensorInfo({4, 1}, DataType);
+ if (DataType == armnn::DataType::QAsymmU8)
+ {
+ tensorInfo.SetQuantizationOffset(0);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+ else if (DataType == armnn::DataType::QAsymmS8)
+ {
+ tensorInfo.SetQuantizationOffset(-128);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
CheckInputOutput(
std::move(workload),
- TensorInfo({4, 1}, DataType),
- TensorInfo({4, 1}, DataType));
+ tensorInfo,
+ tensorInfo);
}
BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)