ArmNN
 20.02
NeonSoftmaxFloatWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "NeonWorkloadUtils.hpp"
9 
11 #include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
12 
13 namespace armnn
14 {
15 
17  const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
18  : FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
19 {
20  m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
21 
22  // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
23  arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
24  arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
25 
26  auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
27  unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
28  layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
29  m_SoftmaxLayer.reset(layer.release());
30 }
31 
33 {
34  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloatWorkload_Execute");
35  m_SoftmaxLayer->run();
36 }
37 
38 } //namespace armnn
39 
NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
const QueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
std::vector< TensorInfo > m_InputTensorInfos
virtual void Execute() const override
unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs