ArmNN
 20.02
NeonSoftmaxUint8Workload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonWorkloadUtils.hpp"
8 
10 
11 #include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
12 
13 namespace armnn
14 {
15 
17  const WorkloadInfo& info,
18  std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
19  : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
20 {
21  m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
22 
23  arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
24  arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
25 
26  const auto outputQuantization = output.info()->quantization_info();
27 
28  if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) ||
29  (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) ||
30  outputQuantization.scale().empty() || outputQuantization.offset().empty())
31  {
33  "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
34  }
35 
36  auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
37  unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
38  layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis);
39  m_SoftmaxLayer.reset(layer.release());
40 }
41 
43 {
44  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute");
45 
46  m_SoftmaxLayer->run();
47 }
48 
49 } //namespace armnn
50 
const QueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
float m_Beta
Exponentiation value.
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
std::vector< TensorInfo > m_InputTensorInfos
NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
virtual void Execute() const override
unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs