diff options
Diffstat (limited to 'src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp index 77fc429b95..92d499737e 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp @@ -19,6 +19,7 @@ namespace armnn namespace { +using ACLMemManagerOnDemand = std::shared_ptr<arm_compute::MemoryManagerOnDemand>; bool IsNeonNormalizationDescriptorSupported(const NormalizationDescriptor& parameters, Optional<std::string&> reasonIfUnsupported) @@ -58,10 +59,16 @@ arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input, } NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, - const WorkloadInfo& info, - std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager) + const WorkloadInfo& info, + ACLMemManagerOnDemand& memoryManager) : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info) { + // Report Profiling Details + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonNormalizationWorkload_Construct", + descriptor.m_Parameters, + info, + this->GetGuid()); + m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1); std::string reasonIfUnsupported; if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional<std::string&>(reasonIfUnsupported))) @@ -99,7 +106,7 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati void NeonNormalizationFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloatWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonNormalizationFloatWorkload_Execute", this->GetGuid()); m_NormalizationLayer->run(); } |