diff options
author | Matthew Bentham <matthew.bentham@arm.com> | 2020-01-24 23:11:43 +0000 |
---|---|---|
committer | Nikhil Raj Arm <nikhil.raj@arm.com> | 2020-02-04 15:42:53 +0000 |
commit | 5e98b012597c13dc0acb048bd10a84a2a397a346 (patch) | |
tree | dd4ee3aa279277e328a4a01905b23605dd679923 /src/backends | |
parent | aa920c56838c2a0b31bd4e3c54bd57ff2f20969e (diff) | |
download | armnn-5e98b012597c13dc0acb048bd10a84a2a397a346.tar.gz |
Clean up header usage a bit in NEON backend
Including NEFunctions.h is unnecessary and adds about a second
to compile time each translation unit in which it appears,
so we should use just the header file with the arm compute function
declarations that we need.
Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
Change-Id: I605d0eb82ccf2aafa35381a5d9d54337d3fe17a7
Diffstat (limited to 'src/backends')
4 files changed, 18 insertions, 11 deletions
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp index aa454c921b..8b229a1cda 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp @@ -7,6 +7,8 @@ #include "NeonWorkloadUtils.hpp" +#include <arm_compute/runtime/NEON/functions/NEDequantizationLayer.h> + #include <aclCommon/ArmComputeTensorUtils.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <neon/NeonTensorHandle.hpp> @@ -33,9 +35,10 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); - m_Layer.reset(new arm_compute::NEDequantizationLayer()); - m_Layer->configure(&input, &output); - m_Layer->prepare(); + std::unique_ptr<arm_compute::NEDequantizationLayer> layer(new arm_compute::NEDequantizationLayer()); + layer->configure(&input, &output); + layer->prepare(); + m_Layer.reset(layer.release()); } void NeonDequantizeWorkload::Execute() const diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.hpp b/src/backends/neon/workloads/NeonDequantizeWorkload.hpp index a6616376e7..63ea783474 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.hpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.hpp @@ -7,7 +7,8 @@ #include <backendsCommon/Workload.hpp> -#include <arm_compute/runtime/NEON/NEFunctions.h> +#include <arm_compute/core/Error.h> +#include <arm_compute/runtime/IFunction.h> #include <functional> @@ -25,7 +26,7 @@ public: void Execute() const override; private: - mutable std::unique_ptr<arm_compute::NEDequantizationLayer> m_Layer; + mutable std::unique_ptr<arm_compute::IFunction> m_Layer; }; } //namespace armnn diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index 5b5614662d..224e97af2d 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -5,13 +5,14 @@ #include "NeonSplitterWorkload.hpp" -#include "NeonWorkloadUtils.hpp" +#include <arm_compute/runtime/NEON/functions/NESplit.h> #include <aclCommon/ArmComputeTensorUtils.hpp> #include <aclCommon/ArmComputeUtils.hpp> #include <backendsCommon/CpuTensorHandle.hpp> #include <neon/NeonTensorHandle.hpp> +#include "NeonWorkloadUtils.hpp" namespace armnn { @@ -83,7 +84,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri } // Create the layer function - m_Layer.reset(new arm_compute::NESplit()); + std::unique_ptr<arm_compute::NESplit> layer(new arm_compute::NESplit()); // Configure input and output tensors std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape()); @@ -93,10 +94,11 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri } unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin()); - m_Layer->configure(&input, aclOutputs, aclAxis); + layer->configure(&input, aclOutputs, aclAxis); // Prepare - m_Layer->prepare(); + layer->prepare(); + m_Layer.reset(layer.release()); } void NeonSplitterWorkload::Execute() const diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.hpp b/src/backends/neon/workloads/NeonSplitterWorkload.hpp index f9025663ca..66349f947f 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.hpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.hpp @@ -7,7 +7,8 @@ #include <backendsCommon/Workload.hpp> -#include <arm_compute/runtime/NEON/NEFunctions.h> +#include <arm_compute/core/Error.h> +#include <arm_compute/runtime/IFunction.h> #include <functional> @@ -26,7 +27,7 @@ public: void Execute() const override; private: - mutable std::unique_ptr<arm_compute::NESplit> m_Layer; + mutable std::unique_ptr<arm_compute::IFunction> m_Layer; }; } //namespace armnn |