aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
diff options
context:
space:
mode:
authorMatthew Bentham <matthew.bentham@arm.com>2019-01-08 17:52:37 +0000
committerMatthew Bentham <matthew.bentham@arm.com>2019-01-08 18:00:12 +0000
commitd80a7126b0abdd532a9f731559827a23f2e565e0 (patch)
treee04f6454e2353469dd9806b2a589c54b61dd777d /src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
parenta1d3c6a49f35d7d3f11cc7e1b588d1d5401bdbf1 (diff)
downloadarmnn-d80a7126b0abdd532a9f731559827a23f2e565e0.tar.gz
Refactor: Don't include all ComputeLibrary function definitions everywhere.
Just include the function definition that is specifically needed for each workload. Also, tighten up the scope where Compute Library functions are available. Knocks about 30seconds off a 4m30s single-threaded compile of the Neon workloads. Change-Id: Idac438f3bc77ff978295fbc9505cb42447def145
Diffstat (limited to 'src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp11
1 files changed, 8 insertions, 3 deletions
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
index 92e5139c1a..d9c78bbd43 100644
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
@@ -5,13 +5,16 @@
#include "NeonSoftmaxFloatWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
+
namespace armnn
{
NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info)
- , m_SoftmaxLayer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
@@ -19,13 +22,15 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor&
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta);
+ auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta);
+ m_SoftmaxLayer.reset(layer.release());
}
void NeonSoftmaxFloatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloatWorkload_Execute");
- m_SoftmaxLayer.run();
+ m_SoftmaxLayer->run();
}
} //namespace armnn