From 5e98b012597c13dc0acb048bd10a84a2a397a346 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Fri, 24 Jan 2020 23:11:43 +0000 Subject: Clean up header usage a bit in NEON backend Including NEFunctions.h is unnecessary and adds about a second to compile time each translation unit in which it appears, so we should use just the header file with the arm compute function declarations that we need. Signed-off-by: Matthew Bentham Change-Id: I605d0eb82ccf2aafa35381a5d9d54337d3fe17a7 --- src/backends/neon/workloads/NeonDequantizeWorkload.cpp | 9 ++++++--- src/backends/neon/workloads/NeonDequantizeWorkload.hpp | 5 +++-- src/backends/neon/workloads/NeonSplitterWorkload.cpp | 10 ++++++---- src/backends/neon/workloads/NeonSplitterWorkload.hpp | 5 +++-- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp index aa454c921b..8b229a1cda 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include + #include #include #include @@ -33,9 +35,10 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - m_Layer.reset(new arm_compute::NEDequantizationLayer()); - m_Layer->configure(&input, &output); - m_Layer->prepare(); + std::unique_ptr layer(new arm_compute::NEDequantizationLayer()); + layer->configure(&input, &output); + layer->prepare(); + m_Layer.reset(layer.release()); } void NeonDequantizeWorkload::Execute() const diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.hpp b/src/backends/neon/workloads/NeonDequantizeWorkload.hpp index a6616376e7..63ea783474 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.hpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.hpp @@ -7,7 +7,8 @@ #include -#include +#include +#include #include @@ -25,7 +26,7 @@ public: void Execute() const override; private: - mutable std::unique_ptr m_Layer; + mutable std::unique_ptr m_Layer; }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index 5b5614662d..224e97af2d 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -5,13 +5,14 @@ #include "NeonSplitterWorkload.hpp" -#include "NeonWorkloadUtils.hpp" +#include #include #include #include #include +#include "NeonWorkloadUtils.hpp" namespace armnn { @@ -83,7 +84,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri } // Create the layer function - m_Layer.reset(new arm_compute::NESplit()); + std::unique_ptr layer(new arm_compute::NESplit()); // Configure input and output tensors std::set splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape()); @@ -93,10 +94,11 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri } unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin()); - m_Layer->configure(&input, aclOutputs, aclAxis); + layer->configure(&input, aclOutputs, aclAxis); // Prepare - m_Layer->prepare(); + layer->prepare(); + m_Layer.reset(layer.release()); } void NeonSplitterWorkload::Execute() const diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.hpp b/src/backends/neon/workloads/NeonSplitterWorkload.hpp index f9025663ca..66349f947f 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.hpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.hpp @@ -7,7 +7,8 @@ #include -#include +#include +#include #include @@ -26,7 +27,7 @@ public: void Execute() const override; private: - mutable std::unique_ptr m_Layer; + mutable std::unique_ptr m_Layer; }; } //namespace armnn -- cgit v1.2.1