diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-04-30 11:39:37 +0100 |
---|---|---|
committer | Kevin May <kevin.may@arm.com> | 2020-04-30 10:46:12 +0000 |
commit | be88a57579a9a848efe13e6c524b5b104b871733 (patch) | |
tree | d5dc03627048f8ecd2d728b154434244f05475ea /src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp | |
parent | 9937f9359ac4eeefc3535b66eddddd1b4f067c54 (diff) | |
download | armnn-be88a57579a9a848efe13e6c524b5b104b871733.tar.gz |
IVGCVSW-4753 Fix CpuAcc Hal 1.3 Softmax Failures
* Refactor Neon Softmax workload to accept supported data types
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I54aa72d5cbb862cafcc1eabe48f6a00d61050cd7
Diffstat (limited to 'src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp | 51 |
1 files changed, 0 insertions, 51 deletions
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp deleted file mode 100644 index 05d93b963c..0000000000 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonSoftmaxUint8Workload.hpp" -#include "NeonWorkloadUtils.hpp" - -#include <aclCommon/ArmComputeUtils.hpp> -#include <armnn/utility/PolymorphicDowncast.hpp> - -#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h> - -namespace armnn -{ - -NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, - const WorkloadInfo& info, - std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager) - : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info) -{ - m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1); - - arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); - - const auto outputQuantization = output.info()->quantization_info(); - - if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) || - (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) || - outputQuantization.scale().empty() || outputQuantization.offset().empty()) - { - throw InvalidArgumentException( - "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); - } - - auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager); - unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); - layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis); - m_SoftmaxLayer.reset(layer.release()); -} - -void NeonSoftmaxUint8Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute"); - - m_SoftmaxLayer->run(); -} - -} //namespace armnn - |