aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/backends/NeonWorkloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/backends/NeonWorkloads')
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp36
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp16
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp8
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp14
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp5
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp11
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp12
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp3
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp5
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp7
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp10
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp9
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp8
16 files changed, 112 insertions, 56 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
index 10c96d82a6..423f02bcb0 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
@@ -12,9 +12,38 @@
namespace armnn
{
+using namespace armcomputetensorutils;
+
+arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const TensorInfo& biases)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+ const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights);
+ arm_compute::TensorInfo aclBiasesInfo;
+ arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
+
+ if (descriptor.m_BiasEnabled)
+ {
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases);
+ optionalAclBiasesInfo = &aclBiasesInfo;
+ }
+
+ arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
+
+ return arm_compute::NEConvolutionLayer::validate(&aclInputInfo,
+ &aclWeightsInfo,
+ optionalAclBiasesInfo,
+ &aclOutputInfo,
+ layerInfo);
+}
+
template<armnn::DataType dataType>
NeonConvolution2dBaseWorkload<dataType>::NeonConvolution2dBaseWorkload(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: TypedWorkload<Convolution2dQueueDescriptor, dataType>(descriptor, info)
{
using arm_compute::NEDirectConvolutionLayer;
@@ -50,7 +79,7 @@ NeonConvolution2dBaseWorkload<dataType>::NeonConvolution2dBaseWorkload(const Con
if (preferDirectConvolution)
{
- auto directConvolutionLayer = std::make_unique<arm_compute::NEDirectConvolutionLayer>();
+ auto directConvolutionLayer = std::make_unique<arm_compute::NEDirectConvolutionLayer>(memoryManager);
directConvolutionLayer->configure(&input,
&m_KernelTensor,
optionalBiasTensor,
@@ -60,7 +89,7 @@ NeonConvolution2dBaseWorkload<dataType>::NeonConvolution2dBaseWorkload(const Con
}
else
{
- auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>();
+ auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>(memoryManager);
convolutionLayer->configure(&input,
&m_KernelTensor,
optionalBiasTensor,
@@ -81,4 +110,3 @@ template class NeonConvolution2dBaseWorkload<DataType::QuantisedAsymm8>;
} //namespace armnn
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
index 98d075a5ea..d28d50d819 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
@@ -12,16 +12,27 @@
#include "backends/ArmComputeTensorUtils.hpp"
#include "backends/NeonLayerSupport.hpp"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
+
namespace armnn
{
+arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const TensorInfo& biases);
+
template<armnn::DataType dataType>
class NeonConvolution2dBaseWorkload : public TypedWorkload<Convolution2dQueueDescriptor, dataType>
{
public:
using TypedWorkload<Convolution2dQueueDescriptor, dataType>::m_Data;
- NeonConvolution2dBaseWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonConvolution2dBaseWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void ValidateData() const {};
@@ -30,4 +41,5 @@ protected:
arm_compute::Tensor m_KernelTensor;
arm_compute::Tensor m_BiasTensor;
};
-} //namespace armnn \ No newline at end of file
+
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
index a8c5c63683..f20f2a4ac5 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
@@ -13,8 +13,8 @@ namespace armnn
using namespace armcomputetensorutils;
NeonConvolution2dFloat32Workload::NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : NeonConvolution2dBaseWorkload(descriptor, info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+ : NeonConvolution2dBaseWorkload(descriptor, info, memoryManager)
{
if (m_Data.m_Parameters.m_BiasEnabled)
{
@@ -22,7 +22,6 @@ NeonConvolution2dFloat32Workload::NeonConvolution2dFloat32Workload(const Convolu
}
}
-
void NeonConvolution2dFloat32Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonConvolution2dFloat32Workload_Execute");
@@ -34,8 +33,5 @@ void NeonConvolution2dFloat32Workload::ValidateData() const
m_Data.ValidateInputsOutputs("NeonConvolution2dFloat32Workload", 1, 1);
}
-
-
} //namespace armnn
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp
index f4d95d623f..56b0848efa 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp
@@ -5,21 +5,25 @@
#pragma once
-#include <backends/NeonWorkloadUtils.hpp>
#include "NeonConvolution2dBaseWorkload.hpp"
+#include <backends/NeonWorkloadUtils.hpp>
+
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
namespace armnn
{
+
class NeonConvolution2dFloat32Workload : public NeonConvolution2dBaseWorkload<DataType::Float32>
{
public:
- NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
void Execute() const override;
void ValidateData() const override;
};
-} //namespace armnn
-
-
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp
index ae20522361..fb91f7b7b2 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp
@@ -5,12 +5,12 @@
#include "NeonConvolution2dUint8Workload.hpp"
-
namespace armnn
{
+
NeonConvolution2dUint8Workload::NeonConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : NeonConvolution2dBaseWorkload(descriptor, info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+ : NeonConvolution2dBaseWorkload(descriptor, info, memoryManager)
{
if (m_Data.m_Parameters.m_BiasEnabled)
{
@@ -21,7 +21,7 @@ NeonConvolution2dUint8Workload::NeonConvolution2dUint8Workload(const Convolution
void NeonConvolution2dUint8Workload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, NeonConvolution2dUint8Workload_Execute);
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonConvolution2dUint8Workload_Execute");
m_ConvolutionLayer->run();
}
@@ -30,4 +30,4 @@ void NeonConvolution2dUint8Workload::ValidateData() const
m_Data.ValidateInputsOutputs("NeonConvolution2dUint8Workload", 1, 1);
}
-} //namespace armnn \ No newline at end of file
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp
index 319d574b1e..5b977210c4 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp
@@ -7,13 +7,18 @@
#include "NeonConvolution2dBaseWorkload.hpp"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
+
namespace armnn
{
class NeonConvolution2dUint8Workload : public NeonConvolution2dBaseWorkload<DataType::QuantisedAsymm8>
{
public:
- NeonConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void ValidateData() const override;
virtual void Execute() const override;
@@ -22,6 +27,3 @@ private:
} //namespace armnnn
-
-
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp
index 54c4e4333c..e1c4448642 100644
--- a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp
@@ -7,14 +7,14 @@
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
-
namespace armnn
{
using namespace armcomputetensorutils;
NeonFullyConnectedFloat32Workload::NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: Float32Workload<FullyConnectedQueueDescriptor>(descriptor, info)
+ , m_FullyConnectedLayer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonFullyConnectedFloat32Workload", 1, 1);
@@ -51,4 +51,3 @@ void NeonFullyConnectedFloat32Workload::Execute() const
} //namespace armnn
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp
index f9230f1d93..9c722dc573 100644
--- a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp
@@ -7,13 +7,18 @@
#include <backends/NeonWorkloadUtils.hpp>
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
+
namespace armnn
{
class NeonFullyConnectedFloat32Workload : public Float32Workload<FullyConnectedQueueDescriptor>
{
public:
- NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
@@ -24,7 +29,3 @@ private:
} //namespace armnn
-
-
-
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp
index 085f58a219..9f79fa09de 100644
--- a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp
@@ -6,13 +6,13 @@
#include "NeonL2NormalizationFloat32Workload.hpp"
#include "backends/ArmComputeUtils.hpp"
-
namespace armnn
{
NeonL2NormalizationFloat32Workload::NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: Float32Workload<L2NormalizationQueueDescriptor>(descriptor, info)
+ , m_Layer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonL2NormalizationFloat32Workload", 1, 1);
diff --git a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp
index 6cab28366a..2b4a1fef37 100644
--- a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp
@@ -7,20 +7,24 @@
#include <backends/NeonWorkloadUtils.hpp>
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
+
namespace armnn
{
+
class NeonL2NormalizationFloat32Workload : public Float32Workload<L2NormalizationQueueDescriptor>
{
public:
- NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
// Purposely not a NEL2Normalize function. See constructor.
mutable arm_compute::NENormalizationLayer m_Layer;
};
-} //namespace armnn
-
-
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp
index 739390d5a1..0fd0dcc420 100644
--- a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp
@@ -11,8 +11,9 @@ namespace armnn
{
NeonNormalizationFloat32Workload::NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: Float32Workload<NormalizationQueueDescriptor>(descriptor, info)
+ , m_NormalizationLayer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonNormalizationFloat32Workload", 1, 1);
std::string reasonIfUnsupported;
diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp
index 12a0fa80b2..24b6da8528 100644
--- a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp
@@ -7,13 +7,16 @@
#include <backends/NeonWorkloadUtils.hpp>
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
namespace armnn
{
class NeonNormalizationFloat32Workload : public Float32Workload<NormalizationQueueDescriptor>
{
public:
- NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp
index 229562ece2..5e2925ca02 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp
@@ -7,9 +7,11 @@
namespace armnn
{
+
NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info)
+ const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: Float32Workload<SoftmaxQueueDescriptor>(descriptor, info)
+ , m_SoftmaxLayer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonSoftmaxFloat32Workload", 1, 1);
@@ -25,7 +27,6 @@ void NeonSoftmaxFloat32Workload::Execute() const
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonSoftmaxFloat32Workload_Execute");
m_SoftmaxLayer.run();
}
-} //namespace armnn
-
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp
index c466a0f9c6..91d25b47f8 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp
@@ -7,13 +7,18 @@
#include <backends/NeonWorkloadUtils.hpp>
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
+#include <memory>
+
namespace armnn
{
class NeonSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor>
{
public:
- NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
@@ -22,6 +27,3 @@ private:
} //namespace armnn
-
-
-
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp
index a66b0343ff..eb4a23c13c 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp
@@ -5,12 +5,14 @@
#include "NeonSoftmaxUint8Workload.hpp"
-
-
namespace armnn
{
-NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info)
+
+NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info)
+ , m_SoftmaxLayer(memoryManager)
{
m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
@@ -34,5 +36,6 @@ void NeonSoftmaxUint8Workload::Execute() const
m_SoftmaxLayer.run();
}
+
} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp
index bccd82a850..19549ef3ef 100644
--- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp
@@ -7,13 +7,16 @@
#include <backends/NeonWorkloadUtils.hpp>
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+
namespace armnn
{
class NeonSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor>
{
public:
- NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
virtual void Execute() const override;
private:
@@ -22,6 +25,3 @@ private:
} //namespace armnn
-
-
-