diff options
Diffstat (limited to 'src/backends/neon')
3 files changed, 20 insertions, 15 deletions
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp index e8d537f2ef..4b43052365 100644 --- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp +++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp @@ -59,20 +59,24 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis); int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis)); + auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>(); + if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max) { - m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX); + layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX); } else { - m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN); + layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN); } + + m_ArgMinMaxLayer.reset(layer.release()); } void NeonArgMinMaxWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonArgMinMaxWorkload_Execute"); - m_ArgMinMaxLayer.run(); + m_ArgMinMaxLayer->run(); } } //namespace armnn diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp index 6301b13718..6e1cc46c13 100644 --- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp +++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.hpp @@ -8,7 +8,8 @@ #include <backendsCommon/Workload.hpp> #include <arm_compute/core/Error.h> -#include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h> +#include <arm_compute/runtime/IFunction.h> + namespace armnn { @@ -23,7 +24,7 @@ public: virtual void Execute() const override; private: - mutable arm_compute::NEArgMinMaxLayer m_ArgMinMaxLayer; + std::unique_ptr<arm_compute::IFunction> m_ArgMinMaxLayer; }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp index 18085edab5..2093613513 100644 --- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp +++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp @@ -120,19 +120,19 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload( // Check for optimisation opportunities arm_compute::Status optimizationStatus = - arm_compute::NEDepthwiseConvolutionLayerOptimized::validate(inputInfo, - kernelInfo, - biasInfo, - outputInfo, - padStrideInfo, - depthMultiplier, - arm_compute::ActivationLayerInfo(), - aclDilationInfo); + arm_compute::NEDepthwiseConvolutionLayer::validate(inputInfo, + kernelInfo, + biasInfo, + outputInfo, + padStrideInfo, + depthMultiplier, + arm_compute::ActivationLayerInfo(), + aclDilationInfo); if (optimizationStatus.error_code() == arm_compute::ErrorCode::OK) { - m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayerOptimized>(); - static_cast<arm_compute::NEDepthwiseConvolutionLayerOptimized*>( + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>(); + static_cast<arm_compute::NEDepthwiseConvolutionLayer*>( m_pDepthwiseConvolutionLayer.get())->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), |