aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp')
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp41
1 files changed, 0 insertions, 41 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp
deleted file mode 100644
index cff869c9b7..0000000000
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonSoftmaxUint8Workload.hpp"
-
-namespace armnn
-{
-
-NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
- , m_SoftmaxLayer(memoryManager)
-{
- m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
-
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- const auto outputQuantization = output.info()->quantization_info();
-
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
- {
- throw InvalidArgumentException(
- "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
- }
-
- m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta);
-}
-
-void NeonSoftmaxUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxUint8Workload_Execute");
-
- m_SoftmaxLayer.run();
-}
-
-} //namespace armnn
-