aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp11
1 files changed, 8 insertions, 3 deletions
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
index 92e5139c1a..d9c78bbd43 100644
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
@@ -5,13 +5,16 @@
#include "NeonSoftmaxFloatWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
+
namespace armnn
{
NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
- , m_SoftmaxLayer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
@@ -19,13 +22,15 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor&
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
+ auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta);
+ m_SoftmaxLayer.reset(layer.release());
}
void NeonSoftmaxFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloatWorkload_Execute");
- m_SoftmaxLayer.run();
+ m_SoftmaxLayer->run();
}
} //namespace armnn