diff options
Diffstat (limited to 'src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp')
-rw-r--r-- | src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp | 31 |
1 files changed, 31 insertions, 0 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp new file mode 100644 index 0000000000..229562ece2 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonSoftmaxFloat32Workload.hpp" + +namespace armnn +{ +NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<SoftmaxQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonSoftmaxFloat32Workload", 1, 1); + + // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta); +} + +void NeonSoftmaxFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonSoftmaxFloat32Workload_Execute"); + m_SoftmaxLayer.run(); +} +} //namespace armnn + + + |