From be88a57579a9a848efe13e6c524b5b104b871733 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Thu, 30 Apr 2020 11:39:37 +0100 Subject: IVGCVSW-4753 Fix CpuAcc Hal 1.3 Softmax Failures * Refactor Neon Softmax workload to accept supported data types Signed-off-by: Sadik Armagan Change-Id: I54aa72d5cbb862cafcc1eabe48f6a00d61050cd7 --- src/armnn/test/CreateWorkload.hpp | 11 +++++ src/backends/neon/NeonLayerSupport.cpp | 2 +- src/backends/neon/NeonWorkloadFactory.cpp | 3 +- src/backends/neon/backend.mk | 4 +- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 29 ++++++++++-- src/backends/neon/workloads/CMakeLists.txt | 8 +--- .../neon/workloads/NeonSoftmaxBaseWorkload.cpp | 28 ------------ .../neon/workloads/NeonSoftmaxBaseWorkload.hpp | 18 -------- .../neon/workloads/NeonSoftmaxFloatWorkload.cpp | 41 ----------------- .../neon/workloads/NeonSoftmaxFloatWorkload.hpp | 30 ------------ .../neon/workloads/NeonSoftmaxUint8Workload.cpp | 51 --------------------- .../neon/workloads/NeonSoftmaxUint8Workload.hpp | 30 ------------ .../neon/workloads/NeonSoftmaxWorkload.cpp | 53 ++++++++++++++++++++++ .../neon/workloads/NeonSoftmaxWorkload.hpp | 36 +++++++++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 3 +- .../reference/test/RefCreateWorkloadTests.cpp | 16 ++++++- 16 files changed, 145 insertions(+), 218 deletions(-) delete mode 100644 src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp delete mode 100644 src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp delete mode 100644 src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp delete mode 100644 src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp delete mode 100644 src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp delete mode 100644 src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp create mode 100644 src/backends/neon/workloads/NeonSoftmaxWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonSoftmaxWorkload.hpp diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index b6ffd216e0..05d0e2f4ec 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -799,6 +799,17 @@ std::unique_ptr CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto // Connect up armnn::TensorInfo tensorInfo({4, 1}, DataType); + if (DataType == armnn::DataType::QAsymmU8) + { + tensorInfo.SetQuantizationOffset(0); + tensorInfo.SetQuantizationScale(1.f / 256); + } + else if (DataType == armnn::DataType::QAsymmS8) + { + tensorInfo.SetQuantizationOffset(-128); + tensorInfo.SetQuantizationScale(1.f / 256); + } + Connect(input, layer, tensorInfo); Connect(layer, output, tensorInfo); CreateTensorHandles(graph, factory); diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index f47601a1c2..b095ed5629 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -54,7 +54,7 @@ #include "workloads/NeonResizeWorkload.hpp" #include "workloads/NeonRsqrtWorkload.hpp" #include "workloads/NeonSliceWorkload.hpp" -#include "workloads/NeonSoftmaxBaseWorkload.hpp" +#include "workloads/NeonSoftmaxWorkload.hpp" #include "workloads/NeonSpaceToBatchNdWorkload.hpp" #include "workloads/NeonSpaceToDepthWorkload.hpp" #include "workloads/NeonSplitterWorkload.hpp" diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 56ee9a7cd8..4a3533186a 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -474,8 +474,7 @@ std::unique_ptr NeonWorkloadFactory::CreateSlice(const SliceQueueDesc std::unique_ptr NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkloadHelper( - descriptor, info, m_MemoryManager->GetIntraLayerManager()); + return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor, diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 460b68ae0c..2bba74a79d 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -62,9 +62,7 @@ BACKEND_SOURCES := \ workloads/NeonResizeWorkload.cpp \ workloads/NeonRsqrtWorkload.cpp \ workloads/NeonSliceWorkload.cpp \ - workloads/NeonSoftmaxBaseWorkload.cpp \ - workloads/NeonSoftmaxFloatWorkload.cpp \ - workloads/NeonSoftmaxUint8Workload.cpp \ + workloads/NeonSoftmaxWorkload.cpp \ workloads/NeonSpaceToBatchNdWorkload.cpp \ workloads/NeonSpaceToDepthWorkload.cpp \ workloads/NeonSplitterWorkload.cpp \ diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index a89602db7f..0af9bf3e0d 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -582,20 +582,41 @@ static void NeonCreateSoftmaxWorkloadTest() SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType))); + armnn::TensorInfo tensorInfo({4, 1}, DataType); + if (DataType == armnn::DataType::QAsymmU8) + { + tensorInfo.SetQuantizationOffset(0); + tensorInfo.SetQuantizationScale(1.f / 256); + } + else if (DataType == armnn::DataType::QAsymmS8) + { + tensorInfo.SetQuantizationOffset(-128); + tensorInfo.SetQuantizationScale(1.f / 256); + } + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo)); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo)); } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload) { - NeonCreateSoftmaxWorkloadTest(); + NeonCreateSoftmaxWorkloadTest(); } #endif BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload) { - NeonCreateSoftmaxWorkloadTest(); + NeonCreateSoftmaxWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload) +{ + NeonCreateSoftmaxWorkloadTest(); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload) +{ + NeonCreateSoftmaxWorkloadTest(); } template diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 0c02b5cf22..c5548bf28f 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -86,12 +86,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonRsqrtWorkload.hpp NeonSliceWorkload.cpp NeonSliceWorkload.hpp - NeonSoftmaxBaseWorkload.cpp - NeonSoftmaxBaseWorkload.hpp - NeonSoftmaxFloatWorkload.cpp - NeonSoftmaxFloatWorkload.hpp - NeonSoftmaxUint8Workload.cpp - NeonSoftmaxUint8Workload.hpp + NeonSoftmaxWorkload.cpp + NeonSoftmaxWorkload.hpp NeonSpaceToBatchNdWorkload.cpp NeonSpaceToBatchNdWorkload.hpp NeonSpaceToDepthWorkload.cpp diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp deleted file mode 100644 index 41ebfb9fd3..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonSoftmaxBaseWorkload.hpp" - -#include -#include - -#include - -namespace armnn -{ - -arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - const SoftmaxDescriptor& descriptor) -{ - const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); - const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - - unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input); - return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); -} - -} //namespace armnn - diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp deleted file mode 100644 index 6eecb9787d..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include -#include - -namespace armnn -{ - -arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - const SoftmaxDescriptor& descriptor); - -} //namespace armnn diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp deleted file mode 100644 index a4690a7985..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonSoftmaxFloatWorkload.hpp" - -#include "NeonWorkloadUtils.hpp" - -#include -#include - -#include - -namespace armnn -{ - -NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, - const WorkloadInfo& info, std::shared_ptr& memoryManager) - : FloatWorkload(descriptor, info) -{ - m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1); - - // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions. - arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); - - auto layer = std::make_unique(memoryManager); - unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); - layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); - m_SoftmaxLayer.reset(layer.release()); -} - -void NeonSoftmaxFloatWorkload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloatWorkload_Execute"); - m_SoftmaxLayer->run(); -} - -} //namespace armnn - diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp deleted file mode 100644 index 77f2cc3d01..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include - -#include -#include - -#include - -namespace armnn -{ - -class NeonSoftmaxFloatWorkload : public FloatWorkload -{ -public: - NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info, - std::shared_ptr& memoryManager); - virtual void Execute() const override; - -private: - std::unique_ptr m_SoftmaxLayer; -}; - -} //namespace armnn - diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp deleted file mode 100644 index 05d93b963c..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonSoftmaxUint8Workload.hpp" -#include "NeonWorkloadUtils.hpp" - -#include -#include - -#include - -namespace armnn -{ - -NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, - const WorkloadInfo& info, - std::shared_ptr& memoryManager) - : Uint8Workload(descriptor, info) -{ - m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1); - - arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); - - const auto outputQuantization = output.info()->quantization_info(); - - if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) || - (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) || - outputQuantization.scale().empty() || outputQuantization.offset().empty()) - { - throw InvalidArgumentException( - "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); - } - - auto layer = std::make_unique(memoryManager); - unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); - layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis); - m_SoftmaxLayer.reset(layer.release()); -} - -void NeonSoftmaxUint8Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute"); - - m_SoftmaxLayer->run(); -} - -} //namespace armnn - diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp deleted file mode 100644 index c5692084c9..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include - -#include -#include - -#include - -namespace armnn -{ - -class NeonSoftmaxUint8Workload : public Uint8Workload -{ -public: - NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info, - std::shared_ptr& memoryManager); - virtual void Execute() const override; - -private: - std::unique_ptr m_SoftmaxLayer; -}; - -} //namespace armnn - diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp new file mode 100644 index 0000000000..149804bdd6 --- /dev/null +++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp @@ -0,0 +1,53 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonSoftmaxWorkload.hpp" +#include "NeonWorkloadUtils.hpp" + +#include + +#include +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input); + return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); +} + +NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info, std::shared_ptr& memoryManager) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonSoftmaxWorkload", 1, 1); + + // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions. + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); + + auto layer = std::make_unique(memoryManager); + unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); + layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); + m_SoftmaxLayer.reset(layer.release()); +} + +void NeonSoftmaxWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxWorkload_Execute"); + m_SoftmaxLayer->run(); +} + +} //namespace armnn + diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.hpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.hpp new file mode 100644 index 0000000000..26081e10fb --- /dev/null +++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.hpp @@ -0,0 +1,36 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +#include +#include +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const SoftmaxDescriptor& descriptor); + +class NeonSoftmaxWorkload : public BaseWorkload +{ +public: + NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info, + std::shared_ptr& memoryManager); + virtual void Execute() const override; + +private: + std::unique_ptr m_SoftmaxLayer; +}; + +} //namespace armnn + diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 4117a3dd8c..9da698fc8c 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -45,8 +45,7 @@ #include "NeonResizeWorkload.hpp" #include "NeonRsqrtWorkload.hpp" #include "NeonSliceWorkload.hpp" -#include "NeonSoftmaxFloatWorkload.hpp" -#include "NeonSoftmaxUint8Workload.hpp" +#include "NeonSoftmaxWorkload.hpp" #include "NeonSpaceToBatchNdWorkload.hpp" #include "NeonSpaceToDepthWorkload.hpp" #include "NeonSplitterWorkload.hpp" diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 29bfbc0ee2..4a57df7d6a 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -504,10 +504,22 @@ static void RefCreateSoftmaxWorkloadTest() auto workload = CreateSoftmaxWorkloadTest(factory, graph); // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). + + armnn::TensorInfo tensorInfo({4, 1}, DataType); + if (DataType == armnn::DataType::QAsymmU8) + { + tensorInfo.SetQuantizationOffset(0); + tensorInfo.SetQuantizationScale(1.f / 256); + } + else if (DataType == armnn::DataType::QAsymmS8) + { + tensorInfo.SetQuantizationOffset(-128); + tensorInfo.SetQuantizationScale(1.f / 256); + } CheckInputOutput( std::move(workload), - TensorInfo({4, 1}, DataType), - TensorInfo({4, 1}, DataType)); + tensorInfo, + tensorInfo); } BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) -- cgit v1.2.1