17 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
19 , m_SoftmaxLayer(memoryManager)
26 const auto outputQuantization = output.info()->quantization_info();
28 if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) ||
29 (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) ||
30 outputQuantization.scale().empty() || outputQuantization.offset().empty())
33 "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
LayerDescriptor m_Parameters
ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
float m_Beta
Exponentiation value.
const QueueDescriptor m_Data
std::vector< TensorInfo > m_InputTensorInfos
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
void Execute() const override
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
#define ARMNN_SCOPED_PROFILING_EVENT_CL(name)