From 7cbe78140a274cec783049051df7c7298b974f13 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Tue, 25 Jul 2023 17:37:33 +0100 Subject: MLCE-1092 Add Names to Workloads * Added names to Workloads. * Workloads will be given the name of the Layer that created them. * Added new profiling macros to CL Neon and Ref that add the workload name to the event label * Updated workloads to use new macros. * Added missing profiling to Rank Workloads. * Fixed issue where ClConvolution2dWorkload was being reported as Undefined rather than GpuAcc. Signed-off-by: Mike Kelly Change-Id: I0a55eab6c2f455b73943aca8e99a247c3cb2a906 --- src/backends/cl/workloads/ClAbsWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClActivationWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClAdditionWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClArgMinMaxWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClBatchMatMulWorkload.cpp | 2 +- .../cl/workloads/ClBatchNormalizationFloatWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp | 4 ++-- src/backends/cl/workloads/ClCastWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClChannelShuffleWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClComparisonWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClConcatWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClConstantWorkload.cpp | 4 ++-- src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp | 6 +++--- src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp | 6 +++--- src/backends/cl/workloads/ClConvolution2dWorkload.cpp | 8 ++++---- src/backends/cl/workloads/ClConvolution3dWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClDequantizeWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClDivisionWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp | 4 ++-- src/backends/cl/workloads/ClExpWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClFillWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClFloorFloatWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClFullyConnectedWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClGatherNdWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClGatherWorkload.cpp | 6 +++--- .../cl/workloads/ClInstanceNormalizationWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClLogWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClLogicalAndWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClLogicalNotWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClLogicalOrWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClLstmFloatWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClMaximumWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClMeanWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClMinimumWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClMultiplicationWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClNegWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClPadWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClPermuteWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClPermuteWorkload.hpp | 8 +------- src/backends/cl/workloads/ClPooling2dWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClPooling3dWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClPreluWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClQLstmWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClQuantizeWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClRankWorkload.hpp | 4 +++- src/backends/cl/workloads/ClReduceWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClReshapeWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClResizeWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClRsqrtWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSinWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSliceWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSoftmaxWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp | 4 ++-- src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSplitterWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSqrtWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClStackWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClStridedSliceWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClSubtractionWorkload.cpp | 6 +++--- .../cl/workloads/ClTransposeConvolution2dWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClTransposeWorkload.cpp | 6 +++--- src/backends/cl/workloads/ClTransposeWorkload.hpp | 8 +------- .../workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp | 2 +- src/backends/cl/workloads/ClWorkloadUtils.hpp | 12 ++++++++++-- 70 files changed, 206 insertions(+), 208 deletions(-) (limited to 'src/backends/cl') diff --git a/src/backends/cl/workloads/ClAbsWorkload.cpp b/src/backends/cl/workloads/ClAbsWorkload.cpp index c108bd4432..b08cb0a955 100644 --- a/src/backends/cl/workloads/ClAbsWorkload.cpp +++ b/src/backends/cl/workloads/ClAbsWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,14 +34,14 @@ ClAbsWorkload::ClAbsWorkload(const AbsQueueDescriptor& descriptor, arm_compute::ICLTensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClAbsWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAbsWorkload_configure"); m_AbsLayer.configure(clCompileContext, &input, &output); } } void ClAbsWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClAbsWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAbsWorkload_Execute"); RunClFunction(m_AbsLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp index a92f8fb573..08a8a47ba3 100644 --- a/src/backends/cl/workloads/ClActivationWorkload.cpp +++ b/src/backends/cl/workloads/ClActivationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -48,14 +48,14 @@ ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& desc arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClActivationWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClActivationWorkload_configure"); m_ActivationLayer.configure(clCompileContext, &input, &output, activationLayerInfo); } } void ClActivationWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClActivationWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClActivationWorkload_Execute"); RunClFunction(m_ActivationLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp index afdd1bb23a..ceffc082fc 100644 --- a/src/backends/cl/workloads/ClAdditionWorkload.cpp +++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,14 +31,14 @@ ClAdditionWorkload::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClAdditionWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAdditionWorkload_configure"); m_Layer.configure(clCompileContext, &input0, &input1, &output, g_AclConvertPolicy, activationInfo); } } void ClAdditionWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClAdditionWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAdditionWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp index 1f81f7d26e..6290f8ccd3 100644 --- a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp +++ b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -71,7 +71,7 @@ ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descrip int aclAxis = armnn::numeric_cast(CalcAclAxis(numDims, unsignedAxis)); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClArgMinMaxWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClArgMinMaxWorkload_configure"); if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max) { m_ArgMinMaxLayer.configure(clCompileContext, @@ -93,7 +93,7 @@ ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descrip void ClArgMinMaxWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClArgMinMaxWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClArgMinMaxWorkload_Execute"); RunClFunction(m_ArgMinMaxLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp b/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp index d1a2e4b175..5dd542e004 100644 --- a/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp +++ b/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp @@ -108,7 +108,7 @@ ClBatchMatMulWorkload::ClBatchMatMulWorkload(const BatchMatMulQueueDescriptor& d void ClBatchMatMulWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClBatchMatMulWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchMatMulWorkload_Execute"); RunClFunction(m_MatMulLayer, CHECK_LOCATION()); } } //namespace armnn diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp index 389605f17d..b19dc30493 100644 --- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -87,7 +87,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload( const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClBatchNormalizationFloatWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output, @@ -112,7 +112,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload( void ClBatchNormalizationFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClBatchNormalizationFloatWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp index f6d96041cc..2ffbbdce53 100644 --- a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp +++ b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp @@ -154,7 +154,7 @@ ClBatchToSpaceNdWorkload::ClBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDesc const arm_compute::CropInfo cropInfo = BuildArmComputeCropInfo(descriptor.m_Parameters); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClBatchToSpaceNdWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchToSpaceNdWorkload_configure"); m_Layer.configure(clCompileContext, (rank == 3) ? &m_ReshapeInputTensor : &input, blockWidth, @@ -166,7 +166,7 @@ ClBatchToSpaceNdWorkload::ClBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDesc void ClBatchToSpaceNdWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClBatchToSpaceNdWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchToSpaceNdWorkload_Execute"); if (m_LayerReshapeInput) { m_LayerReshapeInput->run(); diff --git a/src/backends/cl/workloads/ClCastWorkload.cpp b/src/backends/cl/workloads/ClCastWorkload.cpp index 25d52c8356..6c77266ea9 100644 --- a/src/backends/cl/workloads/ClCastWorkload.cpp +++ b/src/backends/cl/workloads/ClCastWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -36,14 +36,14 @@ ClCastWorkload::ClCastWorkload(const CastQueueDescriptor& descriptor, arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClCastWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClCastWorkload_configure"); m_CastLayer.configure(clCompileContext, &input, &output, g_AclConvertPolicy); } } void ClCastWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClCastWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClCastWorkload_Execute"); RunClFunction(m_CastLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp index bf2958782e..9ce05713b0 100644 --- a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp +++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -87,14 +87,14 @@ ClChannelShuffleWorkload::ClChannelShuffleWorkload(const ChannelShuffleQueueDesc output.info()->set_data_layout(aclDataLayout); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClChannelShuffleWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClChannelShuffleWorkload_configure"); m_ChannelShuffleLayer.configure(clCompileContext, &input, &output, descriptor.m_Parameters.m_NumGroups); } } void ClChannelShuffleWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClChannelShuffleWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClChannelShuffleWorkload_Execute"); RunClFunction(m_ChannelShuffleLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClComparisonWorkload.cpp b/src/backends/cl/workloads/ClComparisonWorkload.cpp index 2ae7b3bed6..332d9daad9 100644 --- a/src/backends/cl/workloads/ClComparisonWorkload.cpp +++ b/src/backends/cl/workloads/ClComparisonWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -59,14 +59,14 @@ ClComparisonWorkload::ClComparisonWorkload(const ComparisonQueueDescriptor& desc const arm_compute::ComparisonOperation comparisonOperation = ConvertComparisonOperationToAcl(m_Data.m_Parameters); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClComparisonWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClComparisonWorkload_configure"); m_ComparisonLayer.configure(clCompileContext, &input0, &input1, &output, comparisonOperation); } } void ClComparisonWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClComparisonWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClComparisonWorkload_Execute"); RunClFunction(m_ComparisonLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp index 53c4e2c7ff..9a67f07ae8 100644 --- a/src/backends/cl/workloads/ClConcatWorkload.cpp +++ b/src/backends/cl/workloads/ClConcatWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ClConcatWorkload.hpp" @@ -89,7 +89,7 @@ ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, auto layer = std::make_unique(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConcatWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConcatWorkload_configure"); // Configure input and output tensors size_t aclAxis = CalcAxis(descriptor.m_Parameters); layer->configure(clCompileContext, aclInputs, &output, aclAxis); @@ -104,7 +104,7 @@ void ClConcatWorkload::Execute() const { if (m_Layer) { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConcatWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConcatWorkload_Execute"); m_Layer->run(); } } diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp index d6a4ad66ef..bbf6476c0a 100644 --- a/src/backends/cl/workloads/ClConstantWorkload.cpp +++ b/src/backends/cl/workloads/ClConstantWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -51,7 +51,7 @@ ClConstantWorkload::ClConstantWorkload(const ConstantQueueDescriptor& descriptor void ClConstantWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConstantWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConstantWorkload_Execute"); // The intermediate tensor held by the corresponding layer output handler can be initialised with the given data // on the first inference, then reused for subsequent inferences. diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp index 4ac1274130..cfbb7ac4b8 100644 --- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,14 +30,14 @@ ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload( m_OutputProxy = std::make_unique(&output); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvertFp16ToFp32Workload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp16ToFp32Workload_configure"); m_Layer.configure(clCompileContext, m_InputProxy.get(), m_OutputProxy.get(), g_AclConvertPolicy, 0); } } void ClConvertFp16ToFp32Workload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvertFp16ToFp32Workload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp16ToFp32Workload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp index 307314d784..72b84bbdf9 100644 --- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,14 +30,14 @@ ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload( m_OutputProxy = std::make_unique(&output); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvertFp32ToFp16Workload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp32ToFp16Workload_configure"); m_Layer.configure(clCompileContext, m_InputProxy.get(), m_OutputProxy.get(), g_AclConvertPolicy, 0); } } void ClConvertFp32ToFp16Workload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvertFp32ToFp16Workload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp32ToFp16Workload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp index 00e39bdedf..7c3b102412 100644 --- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp +++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -81,7 +81,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip : ClBaseWorkload(descriptor, info) , m_ConvolutionLayer(memoryManager) { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution2dWorkload"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution2dWorkload"); const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY); @@ -118,7 +118,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution2dWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution2dWorkload_configure"); m_ConvolutionLayer.configure(clCompileContext, m_InputProxy.get(), m_WeightsProxy.get(), @@ -163,7 +163,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip void ClConvolution2dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvolution2dWorkload_Execute", GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution2dWorkload_Execute"); RunClFunction(m_ConvolutionLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClConvolution3dWorkload.cpp b/src/backends/cl/workloads/ClConvolution3dWorkload.cpp index 7480dbd64b..417b7fbfc4 100644 --- a/src/backends/cl/workloads/ClConvolution3dWorkload.cpp +++ b/src/backends/cl/workloads/ClConvolution3dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -88,7 +88,7 @@ ClConvolution3dWorkload::ClConvolution3dWorkload(const Convolution3dQueueDescrip isFastMathEnabled); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution3dWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution3dWorkload_configure"); m_ConvolutionLayer.configure(clCompileContext, &input, &weights, @@ -115,7 +115,7 @@ ClConvolution3dWorkload::ClConvolution3dWorkload(const Convolution3dQueueDescrip void ClConvolution3dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvolution3dWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution3dWorkload_Execute"); RunClFunction(m_ConvolutionLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp index 28d700c2a0..1f6823c062 100644 --- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp +++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -62,14 +62,14 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor output.info()->set_data_layout(aclDataLayout); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDepthToSpaceWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthToSpaceWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output, blockSize); } } void ClDepthToSpaceWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDepthToSpaceWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthToSpaceWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp index 91366d765e..31d77a3d2b 100644 --- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp +++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -128,7 +128,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload( m_DepthwiseConvolutionLayer = std::make_unique(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDepthwiseConvolutionWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthwiseConvolutionWorkload_configure"); static_cast(m_DepthwiseConvolutionLayer.get())->configure( clCompileContext, &input, @@ -163,7 +163,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload( void ClDepthwiseConvolutionWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDepthwiseConvolutionWorkload_Execute", GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthwiseConvolutionWorkload_Execute"); ARMNN_ASSERT(m_DepthwiseConvolutionLayer); RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION()); diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp index 0081fb8d25..5b0d5e6b51 100644 --- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp +++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,7 +42,7 @@ ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& desc m_Layer.reset(new arm_compute::CLDequantizationLayer()); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDequantizeWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDequantizeWorkload_configure"); m_Layer->configure(clCompileContext, &input, &output); } m_Layer->prepare(); @@ -52,7 +52,7 @@ void ClDequantizeWorkload::Execute() const { if (m_Layer) { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDequantizeWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDequantizeWorkload_Execute"); m_Layer->run(); } } diff --git a/src/backends/cl/workloads/ClDivisionWorkload.cpp b/src/backends/cl/workloads/ClDivisionWorkload.cpp index cfcb1046cc..0fde03d640 100644 --- a/src/backends/cl/workloads/ClDivisionWorkload.cpp +++ b/src/backends/cl/workloads/ClDivisionWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -45,14 +45,14 @@ ClDivisionWorkload::ClDivisionWorkload(const DivisionQueueDescriptor& descriptor const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDivisionWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDivisionWorkload_configure"); m_ArithmeticDivision.configure(clCompileContext, &input0, &input1, &output, activationInfo); } } void ClDivisionWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDivisionWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDivisionWorkload_Execute"); RunClFunction(m_ArithmeticDivision, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp index df30feb52a..b3cdcbae2a 100644 --- a/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp +++ b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp @@ -29,7 +29,7 @@ ClElementwiseBinaryWorkload::ClElementwiseBinaryWorkload(const ElementwiseBinary const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClElementwiseBinaryWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClElementwiseBinaryWorkload_configure"); switch (descriptor.m_Parameters.m_Operation) { @@ -56,7 +56,7 @@ void ClElementwiseBinaryWorkload::Execute() const { if (m_ElementwiseBinaryLayer) { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClElementwiseBinaryWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClElementwiseBinaryWorkload_Execute"); m_ElementwiseBinaryLayer->run(); } } diff --git a/src/backends/cl/workloads/ClExpWorkload.cpp b/src/backends/cl/workloads/ClExpWorkload.cpp index 15da905051..3ddb588731 100644 --- a/src/backends/cl/workloads/ClExpWorkload.cpp +++ b/src/backends/cl/workloads/ClExpWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,14 +40,14 @@ ClExpWorkload::ClExpWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClExpWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClExpWorkload_configure"); m_ExpLayer.configure(clCompileContext, &input, &output); } } void ClExpWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClExpWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClExpWorkload_Execute"); RunClFunction(m_ExpLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClFillWorkload.cpp b/src/backends/cl/workloads/ClFillWorkload.cpp index d0a43a2cee..d09722bccf 100644 --- a/src/backends/cl/workloads/ClFillWorkload.cpp +++ b/src/backends/cl/workloads/ClFillWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,14 +32,14 @@ ClFillWorkload::ClFillWorkload(const FillQueueDescriptor& descriptor, arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFillWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFillWorkload_configure"); m_Layer.configure(clCompileContext, &output, pixelValue); } } void ClFillWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClFillWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFillWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp index 0aae1a30e3..06074b8bf8 100644 --- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,14 +30,14 @@ ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descripto arm_compute::ICLTensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFloorFloatWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFloorFloatWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output); } } void ClFloorFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClFloorFloatWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFloorFloatWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp index 727ae5634a..8730e738d8 100644 --- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp +++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -77,7 +77,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload( activationInfo); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFullyConnectedWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFullyConnectedWorkload_configure"); m_FullyConnectedLayer.configure(clCompileContext, &input, &weights, @@ -106,7 +106,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload( void ClFullyConnectedWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClFullyConnectedWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFullyConnectedWorkload_Execute"); RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClGatherNdWorkload.cpp b/src/backends/cl/workloads/ClGatherNdWorkload.cpp index f68914645e..1351f9685f 100644 --- a/src/backends/cl/workloads/ClGatherNdWorkload.cpp +++ b/src/backends/cl/workloads/ClGatherNdWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -186,7 +186,7 @@ ClGatherNdWorkload::ClGatherNdWorkload(const GatherNdQueueDescriptor& descriptor BuildArmComputeTensor(m_OutputGather, outputGather_Info); armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_OutputGather); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClGatherNdWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherNdWorkload_configure"); auto aclAxis = ComputeAclAxis(0, paramsInfo); m_GatherLayer.configure(clCompileContext, &input, &m_FlattenedIndices, &m_OutputGather, aclAxis); } @@ -197,7 +197,7 @@ ClGatherNdWorkload::ClGatherNdWorkload(const GatherNdQueueDescriptor& descriptor void ClGatherNdWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClGatherNdWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherNdWorkload_Execute"); RunClFunction(m_MulLayer, CHECK_LOCATION()); RunClFunction(m_ReduceSumLayer, CHECK_LOCATION()); RunClFunction(m_GatherLayer, CHECK_LOCATION()); diff --git a/src/backends/cl/workloads/ClGatherWorkload.cpp b/src/backends/cl/workloads/ClGatherWorkload.cpp index 55bf422d19..281dfc1709 100644 --- a/src/backends/cl/workloads/ClGatherWorkload.cpp +++ b/src/backends/cl/workloads/ClGatherWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -46,14 +46,14 @@ ClGatherWorkload::ClGatherWorkload(const GatherQueueDescriptor& descriptor, int aclAxis = ComputeAclAxis(descriptor.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClGatherWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherWorkload_configure"); m_Layer.configure(clCompileContext, &input, &indices, &output, aclAxis); } }; void ClGatherWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClGatherWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } } // namespace armnn diff --git a/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp b/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp index 54114c11d3..02e9af884d 100644 --- a/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp +++ b/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -51,7 +51,7 @@ ClInstanceNormalizationWorkload::ClInstanceNormalizationWorkload( output.info()->set_data_layout(aclDataLayout); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClInstanceNormalizationWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClInstanceNormalizationWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output, @@ -63,7 +63,7 @@ ClInstanceNormalizationWorkload::ClInstanceNormalizationWorkload( void ClInstanceNormalizationWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClInstanceNormalizationWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClInstanceNormalizationWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp index d120fb28f6..356df9a470 100644 --- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,14 +49,14 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0; { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClL2NormalizationFloatWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClL2NormalizationFloatWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output, axis, m_Data.m_Parameters.m_Eps); } } void ClL2NormalizationFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClL2NormalizationFloatWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClL2NormalizationFloatWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp b/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp index 67c366d1b1..5a3ba65893 100644 --- a/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp +++ b/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -46,14 +46,14 @@ ClLogSoftmaxWorkload::ClLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& desc int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogSoftmaxWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogSoftmaxWorkload_configure"); m_LogSoftmaxLayer.configure(clCompileContext, &input, &output, m_Data.m_Parameters.m_Beta, aclAxis); } } void ClLogSoftmaxWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogSoftmaxWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogSoftmaxWorkload_Execute"); RunClFunction(m_LogSoftmaxLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClLogWorkload.cpp b/src/backends/cl/workloads/ClLogWorkload.cpp index 024a634093..a9bdbf5c53 100644 --- a/src/backends/cl/workloads/ClLogWorkload.cpp +++ b/src/backends/cl/workloads/ClLogWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,14 +34,14 @@ ClLogWorkload::ClLogWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogWorkload_configure"); m_LogLayer.configure(clCompileContext, &input, &output); } } void ClLogWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogWorkload_Execute"); RunClFunction(m_LogLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClLogicalAndWorkload.cpp b/src/backends/cl/workloads/ClLogicalAndWorkload.cpp index c37a300a1c..d74eec0cb2 100644 --- a/src/backends/cl/workloads/ClLogicalAndWorkload.cpp +++ b/src/backends/cl/workloads/ClLogicalAndWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,14 +49,14 @@ ClLogicalAndWorkload::ClLogicalAndWorkload(const LogicalBinaryQueueDescriptor& d arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogicalAndWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalAndWorkload_configure"); m_LogicalAndLayer.configure(clCompileContext, &input0, &input1, &output); } } void ClLogicalAndWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogicalAndWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalAndWorkload_Execute"); m_LogicalAndLayer.run(); } diff --git a/src/backends/cl/workloads/ClLogicalNotWorkload.cpp b/src/backends/cl/workloads/ClLogicalNotWorkload.cpp index 9d2f8fd4d2..5636a6a47c 100644 --- a/src/backends/cl/workloads/ClLogicalNotWorkload.cpp +++ b/src/backends/cl/workloads/ClLogicalNotWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -45,14 +45,14 @@ ClLogicalNotWorkload::ClLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogicalNotWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalNotWorkload_configure"); m_LogicalNotLayer.configure(clCompileContext, &input, &output); } } void ClLogicalNotWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogicalNotWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalNotWorkload_Execute"); m_LogicalNotLayer.run(); } diff --git a/src/backends/cl/workloads/ClLogicalOrWorkload.cpp b/src/backends/cl/workloads/ClLogicalOrWorkload.cpp index 7e3cce1d95..961f519e4c 100644 --- a/src/backends/cl/workloads/ClLogicalOrWorkload.cpp +++ b/src/backends/cl/workloads/ClLogicalOrWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,14 +49,14 @@ ClLogicalOrWorkload::ClLogicalOrWorkload(const LogicalBinaryQueueDescriptor& des arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogicalOrWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalOrWorkload_configure"); m_LogicalOrLayer.configure(clCompileContext, &input0, &input1, &output); } } void ClLogicalOrWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogicalOrWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalOrWorkload_Execute"); m_LogicalOrLayer.run(); } diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp index d20c6fc7b5..e5f4e23a7d 100644 --- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -168,7 +168,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor, ConvertLstmActivationFuncToAclLayerInfo(m_Data.m_Parameters.m_ActivationFunc); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLstmFloatWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLstmFloatWorkload_configure"); m_LstmLayer.configure(clCompileContext, &input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(), m_RecurrentToCellWeightsTensor.get(), @@ -237,7 +237,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor, void ClLstmFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLstmFloatWorkload_Execute", GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLstmFloatWorkload_Execute"); RunClFunction(m_LstmLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClMaximumWorkload.cpp b/src/backends/cl/workloads/ClMaximumWorkload.cpp index 21f1a2324f..58946f1111 100644 --- a/src/backends/cl/workloads/ClMaximumWorkload.cpp +++ b/src/backends/cl/workloads/ClMaximumWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -48,14 +48,14 @@ ClMaximumWorkload::ClMaximumWorkload(const MaximumQueueDescriptor& descriptor, arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMaximumWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMaximumWorkload_configure"); m_MaximumLayer.configure(clCompileContext, &input0, &input1, &output); } } void ClMaximumWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMaximumWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMaximumWorkload_Execute"); RunClFunction(m_MaximumLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClMeanWorkload.cpp b/src/backends/cl/workloads/ClMeanWorkload.cpp index b59eb6f8e4..4241be1ceb 100644 --- a/src/backends/cl/workloads/ClMeanWorkload.cpp +++ b/src/backends/cl/workloads/ClMeanWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -48,14 +48,14 @@ ClMeanWorkload::ClMeanWorkload(const MeanQueueDescriptor& descriptor, m_Data.m_Parameters.m_Axis); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMeanWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMeanWorkload_configure"); m_Layer.configure(clCompileContext, &input, coords, m_Data.m_Parameters.m_KeepDims, &output); } } void ClMeanWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMeanWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMeanWorkload_Execute"); m_Layer.run(); } diff --git a/src/backends/cl/workloads/ClMinimumWorkload.cpp b/src/backends/cl/workloads/ClMinimumWorkload.cpp index 5c329062a3..7dafb704c0 100644 --- a/src/backends/cl/workloads/ClMinimumWorkload.cpp +++ b/src/backends/cl/workloads/ClMinimumWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -48,14 +48,14 @@ ClMinimumWorkload::ClMinimumWorkload(const MinimumQueueDescriptor& descriptor, arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMinimumWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMinimumWorkload_configure"); m_MinimumLayer.configure(clCompileContext, &input0, &input1, &output); } } void ClMinimumWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMinimumWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMinimumWorkload_Execute"); RunClFunction(m_MinimumLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp index 99822b3a65..63eee4a946 100644 --- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp +++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -63,7 +63,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMultiplicationWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMultiplicationWorkload_configure"); // Construct m_PixelWiseMultiplication.configure(clCompileContext, &input0, @@ -78,7 +78,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc void ClMultiplicationWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMultiplicationWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMultiplicationWorkload_Execute"); RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClNegWorkload.cpp b/src/backends/cl/workloads/ClNegWorkload.cpp index 94b5fcbdb6..9bd205cd1f 100644 --- a/src/backends/cl/workloads/ClNegWorkload.cpp +++ b/src/backends/cl/workloads/ClNegWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,14 +34,14 @@ ClNegWorkload::ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClNegWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNegWorkload_configure"); m_NegLayer.configure(clCompileContext, &input, &output); } } void ClNegWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClNegWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNegWorkload_Execute"); RunClFunction(m_NegLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp index 8de8dd5c3b..f218fa4db6 100644 --- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -51,14 +51,14 @@ ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQu arm_compute::NormalizationLayerInfo normalizationInfo = BuildArmComputeNormalizationLayerInfo(m_Data.m_Parameters); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClNormalizationFloatWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNormalizationFloatWorkload_configure"); m_NormalizationLayer.configure(clCompileContext, &input, &output, normalizationInfo); } }; void ClNormalizationFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClNormalizationFloatWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNormalizationFloatWorkload_Execute"); RunClFunction(m_NormalizationLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp index aecfb278c5..09169ec4b8 100644 --- a/src/backends/cl/workloads/ClPadWorkload.cpp +++ b/src/backends/cl/workloads/ClPadWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,7 +43,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor, arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPadWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPadWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output, @@ -55,7 +55,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor, void ClPadWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPadWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPadWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp index f3d12ae72c..cfc7c79c2d 100644 --- a/src/backends/cl/workloads/ClPermuteWorkload.cpp +++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -46,7 +46,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPermuteWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPermuteWorkload_configure"); // Run the layer. m_PermuteFunction.configure(clCompileContext, &input, &output, BuildArmComputePermutationVector(mappings)); } @@ -54,7 +54,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, void ClPermuteWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(GetName() + "_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPermuteWorkload_Execute"); RunClFunction(m_PermuteFunction, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClPermuteWorkload.hpp b/src/backends/cl/workloads/ClPermuteWorkload.hpp index a7afbc7b34..8f2a91dc1f 100644 --- a/src/backends/cl/workloads/ClPermuteWorkload.hpp +++ b/src/backends/cl/workloads/ClPermuteWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -23,12 +23,6 @@ arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo& input, class ClPermuteWorkload : public ClBaseWorkload { public: - static const std::string& GetName() - { - static const std::string name = std::string("ClPermuteWorkload"); - return name; - } - ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info, const arm_compute::CLCompileContext& clCompileContext); diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp index 40a794ea2e..16464dae82 100644 --- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp +++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -56,7 +56,7 @@ ClPooling2dWorkload::ClPooling2dWorkload( arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters, fpMixedPrecision); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPooling2dWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling2dWorkload_configure"); // Run the layer. m_PoolingLayer.configure(clCompileContext, &input, &output, layerInfo); } @@ -64,7 +64,7 @@ ClPooling2dWorkload::ClPooling2dWorkload( void ClPooling2dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPooling2dWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling2dWorkload_Execute"); RunClFunction(m_PoolingLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClPooling3dWorkload.cpp b/src/backends/cl/workloads/ClPooling3dWorkload.cpp index a896110a2e..d8e94cbf4b 100644 --- a/src/backends/cl/workloads/ClPooling3dWorkload.cpp +++ b/src/backends/cl/workloads/ClPooling3dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -56,7 +56,7 @@ namespace armnn fpMixedPrecision); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPooling3dWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling3dWorkload_configure"); // Run the layer. m_PoolingLayer.configure(clCompileContext, &input, &output, layerInfo); } @@ -64,7 +64,7 @@ namespace armnn void ClPooling3dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPooling3dWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling3dWorkload_Execute"); RunClFunction(m_PoolingLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClPreluWorkload.cpp b/src/backends/cl/workloads/ClPreluWorkload.cpp index b2b8eebfaf..9c678daa44 100644 --- a/src/backends/cl/workloads/ClPreluWorkload.cpp +++ b/src/backends/cl/workloads/ClPreluWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -38,14 +38,14 @@ ClPreluWorkload::ClPreluWorkload(const PreluQueueDescriptor& descriptor, arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPreluWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPreluWorkload_configure"); m_PreluLayer.configure(clCompileContext, &input, &alpha, &output); } } void ClPreluWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPreluWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPreluWorkload_Execute"); RunClFunction(m_PreluLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClQLstmWorkload.cpp b/src/backends/cl/workloads/ClQLstmWorkload.cpp index 92090e666c..5c05b44ab7 100644 --- a/src/backends/cl/workloads/ClQLstmWorkload.cpp +++ b/src/backends/cl/workloads/ClQLstmWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -159,7 +159,7 @@ ClQLstmWorkload::ClQLstmWorkload(const QLstmQueueDescriptor& descriptor, m_Data.m_Parameters.m_OutputIntermediateScale); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClQLstmWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQLstmWorkload_configure"); // QLSTM CL configure m_QLstmLayer.configure(clCompileContext, &input, @@ -240,7 +240,7 @@ ClQLstmWorkload::ClQLstmWorkload(const QLstmQueueDescriptor& descriptor, void ClQLstmWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizedLstmWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_Execute"); m_QLstmLayer.run(); } diff --git a/src/backends/cl/workloads/ClQuantizeWorkload.cpp b/src/backends/cl/workloads/ClQuantizeWorkload.cpp index add2f3d9a0..07d5766ef6 100644 --- a/src/backends/cl/workloads/ClQuantizeWorkload.cpp +++ b/src/backends/cl/workloads/ClQuantizeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,14 +40,14 @@ ClQuantizeWorkload::ClQuantizeWorkload(const QuantizeQueueDescriptor& descriptor arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClQuantizeWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizeWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output); } } void ClQuantizeWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizeWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizeWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp index 0fb19ecd71..dc7cc388d1 100644 --- a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp +++ b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -110,7 +110,7 @@ ClQuantizedLstmWorkload::ClQuantizedLstmWorkload(const QuantizedLstmQueueDescrip arm_compute::ICLTensor& outputStateOutTensor = static_cast(m_Data.m_Outputs[1])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClQuantizedLstmWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_configure"); m_QuantizedLstmLayer.configure(clCompileContext, &inputTensor, m_InputToInputWeightsTensor.get(), m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(), @@ -141,7 +141,7 @@ ClQuantizedLstmWorkload::ClQuantizedLstmWorkload(const QuantizedLstmQueueDescrip void ClQuantizedLstmWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizedLstmWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_Execute"); RunClFunction(m_QuantizedLstmLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClRankWorkload.hpp b/src/backends/cl/workloads/ClRankWorkload.hpp index 8a7e2c2078..b87b6b8866 100644 --- a/src/backends/cl/workloads/ClRankWorkload.hpp +++ b/src/backends/cl/workloads/ClRankWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -19,6 +19,8 @@ public: using ClBaseWorkload::ClBaseWorkload; virtual void Execute() const override { + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClRankWorkload_Execute"); + const ClTensorHandle* clTensorHandle = PolymorphicDowncast(m_Data.m_Inputs[0]); const int32_t rank = static_cast(clTensorHandle->GetShape().GetNumDimensions()); diff --git a/src/backends/cl/workloads/ClReduceWorkload.cpp b/src/backends/cl/workloads/ClReduceWorkload.cpp index ace76935c4..b9056c1a8e 100644 --- a/src/backends/cl/workloads/ClReduceWorkload.cpp +++ b/src/backends/cl/workloads/ClReduceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -61,7 +61,7 @@ ClReduceWorkload::ClReduceWorkload(const ReduceQueueDescriptor& descriptor, cons info.m_InputTensorInfos[0].GetNumDimensions(), m_Data.m_Parameters.m_vAxis); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClReduceWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReduceWorkload_configure"); m_Layer.configure(&input, &output, static_cast(coords[0]), @@ -72,7 +72,7 @@ ClReduceWorkload::ClReduceWorkload(const ReduceQueueDescriptor& descriptor, cons void ClReduceWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClReduceWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReduceWorkload_Execute"); m_Layer.run(); } diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp index b666e7cc7b..7fa5ee01d1 100644 --- a/src/backends/cl/workloads/ClReshapeWorkload.cpp +++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,14 +32,14 @@ ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor, arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClReshapeWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReshapeWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output); } } void ClReshapeWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClReshapeWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReshapeWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClResizeWorkload.cpp b/src/backends/cl/workloads/ClResizeWorkload.cpp index 7d6d938d5e..e86feb461d 100644 --- a/src/backends/cl/workloads/ClResizeWorkload.cpp +++ b/src/backends/cl/workloads/ClResizeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -74,7 +74,7 @@ ClResizeWorkload::ClResizeWorkload(const ResizeQueueDescriptor& descriptor, : arm_compute::SamplingPolicy::TOP_LEFT; { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClResizeWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClResizeWorkload_configure"); m_ResizeLayer.configure(clCompileContext, &input, &output, @@ -90,7 +90,7 @@ ClResizeWorkload::ClResizeWorkload(const ResizeQueueDescriptor& descriptor, void ClResizeWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClResizeWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClResizeWorkload_Execute"); RunClFunction(m_ResizeLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClRsqrtWorkload.cpp b/src/backends/cl/workloads/ClRsqrtWorkload.cpp index 3bc5f38166..441657f2ef 100644 --- a/src/backends/cl/workloads/ClRsqrtWorkload.cpp +++ b/src/backends/cl/workloads/ClRsqrtWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,14 +34,14 @@ ClRsqrtWorkload::ClRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClRsqrtWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClRsqrtWorkload_configure"); m_RsqrtLayer.configure(clCompileContext, &input, &output); } } void ClRsqrtWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClRsqrtWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClRsqrtWorkload_Execute"); RunClFunction(m_RsqrtLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClSinWorkload.cpp b/src/backends/cl/workloads/ClSinWorkload.cpp index bcab32fa9a..0eabf13e73 100644 --- a/src/backends/cl/workloads/ClSinWorkload.cpp +++ b/src/backends/cl/workloads/ClSinWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,14 +34,14 @@ ClSinWorkload::ClSinWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSinWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSinWorkload_configure"); m_SinLayer.configure(clCompileContext, &input, &output); } } void ClSinWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSinWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSinWorkload_Execute"); RunClFunction(m_SinLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClSliceWorkload.cpp b/src/backends/cl/workloads/ClSliceWorkload.cpp index 3976e120d2..30b05ca7fb 100644 --- a/src/backends/cl/workloads/ClSliceWorkload.cpp +++ b/src/backends/cl/workloads/ClSliceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -52,14 +52,14 @@ ClSliceWorkload::ClSliceWorkload(const SliceQueueDescriptor& descriptor, std::tie(starts, ends) = SetClSliceData(m_Data.m_Parameters.m_Begin, m_Data.m_Parameters.m_Size); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSliceWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSliceWorkload_configure"); m_SliceFunction.configure(clCompileContext, &input, &output, starts, ends); } } void ClSliceWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSliceWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSliceWorkload_Execute"); RunClFunction(m_SliceFunction, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClSoftmaxWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxWorkload.cpp index 99bc89e200..d884f3022e 100644 --- a/src/backends/cl/workloads/ClSoftmaxWorkload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -45,14 +45,14 @@ ClSoftmaxWorkload::ClSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor, int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSoftmaxWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSoftmaxWorkload_configure"); m_SoftmaxLayer.configure(clCompileContext, &input, &output, m_Data.m_Parameters.m_Beta, aclAxis); } } void ClSoftmaxWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSoftmaxWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSoftmaxWorkload_Execute"); RunClFunction(m_SoftmaxLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp index fd90adb2ff..6ecdff1157 100644 --- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp +++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp @@ -164,7 +164,7 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDesc descriptor.m_Parameters.m_PadList[0].second); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSpaceToBatchNdWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToBatchNdWorkload_configure"); m_Layer.configure(clCompileContext, rank == 3 ? &m_ReshapeInputTensor : &input, blockWidth, @@ -177,7 +177,7 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDesc void ClSpaceToBatchNdWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSpaceToBatchNdWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToBatchNdWorkload_Execute"); if (m_LayerReshapeInput) { m_LayerReshapeInput->run(); diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp index da1a350290..a106436799 100644 --- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp +++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,14 +40,14 @@ ClSpaceToDepthWorkload::ClSpaceToDepthWorkload(const SpaceToDepthQueueDescriptor output.info()->set_data_layout(aclDataLayout); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSpaceToDepthWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToDepthWorkload_configure"); m_Layer.configure(clCompileContext, &input, &output, blockSize); } } void ClSpaceToDepthWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSpaceToDepthWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToDepthWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp index f4622ce26d..ec904eb51b 100644 --- a/src/backends/cl/workloads/ClSplitterWorkload.cpp +++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -103,7 +103,7 @@ ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin()); auto layer = std::make_unique(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSplitterWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSplitterWorkload_configure"); layer->configure(&input, aclOutputs, aclAxis); } @@ -117,7 +117,7 @@ void ClSplitterWorkload::Execute() const { if (m_Layer) { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSplitterWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSplitterWorkload_Execute"); m_Layer->run(); } } diff --git a/src/backends/cl/workloads/ClSqrtWorkload.cpp b/src/backends/cl/workloads/ClSqrtWorkload.cpp index b78c114cf7..e36adf6d4c 100644 --- a/src/backends/cl/workloads/ClSqrtWorkload.cpp +++ b/src/backends/cl/workloads/ClSqrtWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -53,14 +53,14 @@ ClSqrtWorkload::ClSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSqrtWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSqrtWorkload_configure"); m_SqrtLayer.configure(clCompileContext, &input, &output, activationLayerInfo); } } void ClSqrtWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSqrtWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSqrtWorkload_Execute"); RunClFunction(m_SqrtLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp index 46b4702783..f25a3c0fbe 100644 --- a/src/backends/cl/workloads/ClStackWorkload.cpp +++ b/src/backends/cl/workloads/ClStackWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ClStackWorkload.hpp" @@ -67,7 +67,7 @@ ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, m_Layer.reset(new arm_compute::CLStackLayer()); int aclAxis = CalcAxis(descriptor.m_Parameters.m_Axis, descriptor.m_Parameters.m_InputShape.GetNumDimensions()); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClStackWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStackWorkload_configure"); m_Layer->configure(clCompileContext, aclInputs, aclAxis, &output); } } @@ -76,7 +76,7 @@ void ClStackWorkload::Execute() const { if (m_Layer) { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClStackWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStackWorkload_Execute"); m_Layer->run(); } } diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp index 62a59feed4..3889c20e0f 100644 --- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp +++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -86,7 +86,7 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor output.info()->set_data_layout(aclDataLayout); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClStridedSliceWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStridedSliceWorkload_configure"); m_StridedSliceLayer.configure(clCompileContext, &input, &output, @@ -101,7 +101,7 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor void ClStridedSliceWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClStridedSliceWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStridedSliceWorkload_Execute"); RunClFunction(m_StridedSliceLayer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp index 789d457ff4..31bd5de14d 100644 --- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp +++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,14 +32,14 @@ ClSubtractionWorkload::ClSubtractionWorkload(const SubtractionQueueDescriptor& d const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSubtractionWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSubtractionWorkload_configure"); m_Layer.configure(clCompileContext, &input0, &input1, &output, g_AclConvertPolicy, activationInfo); } } void ClSubtractionWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSubtractionWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSubtractionWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp index 96c0a81a2f..d3eeadeb31 100644 --- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp +++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -101,7 +101,7 @@ ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload( arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters); { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClTransposeConvolution2dWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeConvolution2dWorkload_configure"); m_Layer.configure(clCompileContext, &input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, padStrideInfo); } @@ -119,7 +119,7 @@ ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload( void ClTransposeConvolution2dWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClTransposeConvolution2dWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeConvolution2dWorkload_Execute"); RunClFunction(m_Layer, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClTransposeWorkload.cpp b/src/backends/cl/workloads/ClTransposeWorkload.cpp index 383f5f1faf..8f3ccdbe99 100644 --- a/src/backends/cl/workloads/ClTransposeWorkload.cpp +++ b/src/backends/cl/workloads/ClTransposeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,7 +43,7 @@ ClTransposeWorkload::ClTransposeWorkload(const TransposeQueueDescriptor& descrip arm_compute::ICLTensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; { - ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClTransposeWorkload_configure"); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeWorkload_configure"); // Run the layer. m_PermuteFunction.configure(clCompileContext, &input, @@ -54,7 +54,7 @@ ClTransposeWorkload::ClTransposeWorkload(const TransposeQueueDescriptor& descrip void ClTransposeWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(GetName() + "_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeWorkload_Execute"); RunClFunction(m_PermuteFunction, CHECK_LOCATION()); } diff --git a/src/backends/cl/workloads/ClTransposeWorkload.hpp b/src/backends/cl/workloads/ClTransposeWorkload.hpp index fb4803592f..a22f631bb8 100644 --- a/src/backends/cl/workloads/ClTransposeWorkload.hpp +++ b/src/backends/cl/workloads/ClTransposeWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -23,12 +23,6 @@ arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo& input, class ClTransposeWorkload : public ClBaseWorkload { public: - static const std::string& GetName() - { - static const std::string name = std::string("ClTransposeWorkload"); - return name; - } - ClTransposeWorkload(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info, const arm_compute::CLCompileContext& clCompileContext); diff --git a/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp index fb31d7c283..ae2b901f65 100644 --- a/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp @@ -481,7 +481,7 @@ ClUnidirectionalSequenceLstmFloatWorkload::ClUnidirectionalSequenceLstmFloatWork void ClUnidirectionalSequenceLstmFloatWorkload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClUnidirectionalSequenceLstmFloatWorkload_Execute", GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClUnidirectionalSequenceLstmFloatWorkload_Execute"); if (m_Permute1) { m_Permute1->run(); diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp index 8f2fb48238..4b491e3cec 100644 --- a/src/backends/cl/workloads/ClWorkloadUtils.hpp +++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -28,7 +28,15 @@ #define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ guid, \ - name, \ + GetName() + "_" + name, \ + armnn::OpenClTimer(), \ + armnn::WallClockTimer()) + +/// Creates a profiling event that uses GetGuid() and GetName() from the calling class +#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label) \ + ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ + this->GetGuid(), \ + this->GetName() + "_" + label, \ armnn::OpenClTimer(), \ armnn::WallClockTimer()) -- cgit v1.2.1