diff options
-rw-r--r-- | src/backends/neon/NeonLayerSupport.cpp | 46 | ||||
-rw-r--r-- | src/backends/neon/NeonWorkloadFactory.cpp | 42 | ||||
-rw-r--r-- | src/backends/neon/backend.mk | 1 | ||||
-rw-r--r-- | src/backends/neon/workloads/CMakeLists.txt | 4 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonExpWorkload.cpp | 42 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonExpWorkload.hpp | 32 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonWorkloads.hpp | 7 |
7 files changed, 130 insertions, 44 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 5d59ab83aa..f47601a1c2 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -7,6 +7,7 @@ #include "NeonBackendId.hpp" #include <armnn/Descriptors.hpp> +#include <armnn/Exceptions.hpp> #include <armnn/Tensor.hpp> #include <armnn/Types.hpp> #include <armnn/BackendRegistry.hpp> @@ -24,6 +25,7 @@ #include "workloads/NeonArgMinMaxWorkload.hpp" #include "workloads/NeonBatchNormalizationWorkload.hpp" #include "workloads/NeonBatchToSpaceNdWorkload.hpp" +#include "workloads/NeonExpWorkload.hpp" #include "workloads/NeonComparisonWorkload.hpp" #include "workloads/NeonConstantWorkload.hpp" #include "workloads/NeonConvolution2dWorkload.hpp" @@ -374,29 +376,31 @@ bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, const ElementwiseUnaryDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const { - if (descriptor.m_Operation == UnaryOperation::Abs) + switch(descriptor.m_Operation) { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate, - reasonIfUnsupported, - input, - output); - } - else if (descriptor.m_Operation == UnaryOperation::Rsqrt) - { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, - reasonIfUnsupported, - input, - output); - } - else if (descriptor.m_Operation == UnaryOperation::Neg) - { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate, - reasonIfUnsupported, - input, - output); + case UnaryOperation::Abs: + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate, + reasonIfUnsupported, + input, + output); + case UnaryOperation::Exp: + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonExpWorkloadValidate, + reasonIfUnsupported, + input, + output); + case UnaryOperation::Neg: + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate, + reasonIfUnsupported, + input, + output); + case UnaryOperation::Rsqrt: + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, + reasonIfUnsupported, + input, + output); + default: + return false; } - - return false; } bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index b7609ee765..237808c26e 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -227,27 +227,31 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision( std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary( const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + switch(descriptor.m_Parameters.m_Operation) { - AbsQueueDescriptor absQueueDescriptor; - absQueueDescriptor.m_Inputs = descriptor.m_Inputs; - absQueueDescriptor.m_Outputs = descriptor.m_Outputs; - - return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info); - } - else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) - { - RsqrtQueueDescriptor rsqrtQueueDescriptor; - rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; - rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; - - return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info); + case UnaryOperation::Abs: + { + AbsQueueDescriptor absQueueDescriptor; + absQueueDescriptor.m_Inputs = descriptor.m_Inputs; + absQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info); + } + case UnaryOperation::Rsqrt: + { + RsqrtQueueDescriptor rsqrtQueueDescriptor; + rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; + rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info); + } + case UnaryOperation::Neg: + return std::make_unique<NeonNegWorkload>(descriptor, info); + case UnaryOperation::Exp: + return std::make_unique<NeonExpWorkload>(descriptor, info); + default: + return nullptr; } - else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Neg) - { - return std::make_unique<NeonNegWorkload>(descriptor, info); - } - return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info); } std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index c6b22306ea..460b68ae0c 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -39,6 +39,7 @@ BACKEND_SOURCES := \ workloads/NeonDepthwiseConvolutionWorkload.cpp \ workloads/NeonDequantizeWorkload.cpp \ workloads/NeonDetectionPostProcessWorkload.cpp \ + workloads/NeonExpWorkload.cpp \ workloads/NeonFloorFloatWorkload.cpp \ workloads/NeonFullyConnectedWorkload.cpp \ workloads/NeonInstanceNormalizationWorkload.cpp \ diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 7db315f116..0c02b5cf22 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -40,6 +40,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonDequantizeWorkload.hpp NeonDetectionPostProcessWorkload.cpp NeonDetectionPostProcessWorkload.hpp + NeonExpWorkload.cpp + NeonExpWorkload.hpp NeonFloorFloatWorkload.cpp NeonFloorFloatWorkload.hpp NeonFullyConnectedWorkload.cpp @@ -110,7 +112,7 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonWorkloadUtils.hpp ) -add_library(armnnNeonBackendWorkloads OBJECT ${armnnNeonBackendWorkloads_sources}) +add_library(armnnNeonBackendWorkloads OBJECT ${armnnNeonBackendWorkloads_sources} NeonExpWorkload.cpp NeonExpWorkload.hpp) target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) diff --git a/src/backends/neon/workloads/NeonExpWorkload.cpp b/src/backends/neon/workloads/NeonExpWorkload.cpp new file mode 100644 index 0000000000..7baaa84547 --- /dev/null +++ b/src/backends/neon/workloads/NeonExpWorkload.cpp @@ -0,0 +1,42 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonExpWorkload.hpp" + +#include "NeonWorkloadUtils.hpp" + +#include <aclCommon/ArmComputeTensorHandle.hpp> +#include <aclCommon/ArmComputeTensorUtils.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> + +namespace armnn +{ + +arm_compute::Status NeonExpWorkloadValidate(const TensorInfo& input, const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEExpLayer::validate(&aclInput, &aclOutput); +} + +NeonExpWorkload::NeonExpWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonExpWorkload", 1, 1); + + arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_ExpLayer.configure(&input, &output); +} + +void NeonExpWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonExpWorkload_Execute"); + m_ExpLayer.run(); +} + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonExpWorkload.hpp b/src/backends/neon/workloads/NeonExpWorkload.hpp new file mode 100644 index 0000000000..d64340cb6d --- /dev/null +++ b/src/backends/neon/workloads/NeonExpWorkload.hpp @@ -0,0 +1,32 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <backendsCommon/Workload.hpp> +#include <arm_compute/core/Error.h> +#include <arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h> + +namespace armnn +{ + +arm_compute::Status NeonExpWorkloadValidate(const TensorInfo& input, const TensorInfo& output); + +class NeonExpWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor> +{ +public: + NeonExpWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEExpLayer m_ExpLayer; +}; + +} //namespace armnn + + + + + diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index f25554722d..4117a3dd8c 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -7,11 +7,11 @@ #include "NeonAbsWorkload.hpp" #include "NeonActivationWorkload.hpp" #include "NeonAdditionWorkload.hpp" -#include "NeonDivisionWorkload.hpp" #include "NeonArgMinMaxWorkload.hpp" #include "NeonBatchNormalizationWorkload.hpp" #include "NeonBatchToSpaceNdWorkload.hpp" #include "NeonComparisonWorkload.hpp" +#include "NeonConcatWorkload.hpp" #include "NeonConstantWorkload.hpp" #include "NeonConvertBf16ToFp32Workload.hpp" #include "NeonConvertFp16ToFp32Workload.hpp" @@ -22,15 +22,15 @@ #include "NeonDepthwiseConvolutionWorkload.hpp" #include "NeonDequantizeWorkload.hpp" #include "NeonDetectionPostProcessWorkload.hpp" +#include "NeonDivisionWorkload.hpp" +#include "NeonExpWorkload.hpp" #include "NeonFloorFloatWorkload.hpp" #include "NeonFullyConnectedWorkload.hpp" #include "NeonInstanceNormalizationWorkload.hpp" #include "NeonL2NormalizationFloatWorkload.hpp" #include "NeonLstmFloatWorkload.hpp" -#include "NeonQuantizedLstmWorkload.hpp" #include "NeonMaximumWorkload.hpp" #include "NeonMeanWorkload.hpp" -#include "NeonConcatWorkload.hpp" #include "NeonMinimumWorkload.hpp" #include "NeonMultiplicationWorkload.hpp" #include "NeonNegWorkload.hpp" @@ -39,6 +39,7 @@ #include "NeonPermuteWorkload.hpp" #include "NeonPooling2dWorkload.hpp" #include "NeonPreluWorkload.hpp" +#include "NeonQuantizedLstmWorkload.hpp" #include "NeonQuantizeWorkload.hpp" #include "NeonReshapeWorkload.hpp" #include "NeonResizeWorkload.hpp" |