diff options
author | Matthew Bentham <matthew.bentham@arm.com> | 2019-01-08 17:52:37 +0000 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2019-01-08 18:00:12 +0000 |
commit | d80a7126b0abdd532a9f731559827a23f2e565e0 (patch) | |
tree | e04f6454e2353469dd9806b2a589c54b61dd777d /src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp | |
parent | a1d3c6a49f35d7d3f11cc7e1b588d1d5401bdbf1 (diff) | |
download | armnn-d80a7126b0abdd532a9f731559827a23f2e565e0.tar.gz |
Refactor: Don't include all ComputeLibrary function definitions everywhere.
Just include the function definition that is specifically needed for each workload.
Also, tighten up the scope where Compute Library functions are available.
Knocks about 30seconds off a 4m30s single-threaded compile of the Neon workloads.
Change-Id: Idac438f3bc77ff978295fbc9505cb42447def145
Diffstat (limited to 'src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index afaa700624..99bbcfa824 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -4,8 +4,13 @@ // #include "NeonL2NormalizationFloatWorkload.hpp" + +#include "NeonWorkloadUtils.hpp" + #include <aclCommon/ArmComputeUtils.hpp> +#include <arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h> + namespace armnn { using namespace armcomputetensorutils; @@ -25,7 +30,6 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager) : FloatWorkload<L2NormalizationQueueDescriptor>(descriptor, info) - , m_Layer(memoryManager) { m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1); @@ -38,13 +42,15 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma unsigned int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0; - m_Layer.configure(&input, &output, axis); + auto layer = std::make_unique<arm_compute::NEL2NormalizeLayer>(memoryManager); + layer->configure(&input, &output, axis); + m_Layer.reset(layer.release()); } void NeonL2NormalizationFloatWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonL2NormalizationFloatWorkload_Execute"); - m_Layer.run(); + m_Layer->run(); } } //namespace armnn |