aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-07-25 17:37:33 +0100
committermike.kelly <mike.kelly@arm.com>2023-07-31 15:34:06 +0000
commit7cbe78140a274cec783049051df7c7298b974f13 (patch)
treed39f19b35e120b59d9dbd7f0c7eb27ef97621ebe
parent21e399974e3cdd8db00bd76af813aa0485395b60 (diff)
downloadarmnn-7cbe78140a274cec783049051df7c7298b974f13.tar.gz
MLCE-1092 Add Names to Workloads
* Added names to Workloads. * Workloads will be given the name of the Layer that created them. * Added new profiling macros to CL Neon and Ref that add the workload name to the event label * Updated workloads to use new macros. * Added missing profiling to Rank Workloads. * Fixed issue where ClConvolution2dWorkload was being reported as Undefined rather than GpuAcc. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I0a55eab6c2f455b73943aca8e99a247c3cb2a906
-rw-r--r--include/armnn/backends/IWorkload.hpp5
-rw-r--r--include/armnn/backends/Workload.hpp11
-rw-r--r--include/armnn/backends/WorkloadInfo.hpp3
-rw-r--r--src/armnn/Layer.hpp1
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp6
-rw-r--r--src/backends/cl/workloads/ClAbsWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClActivationWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClAdditionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClArgMinMaxWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClBatchMatMulWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClCastWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClChannelShuffleWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClComparisonWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClConcatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClConstantWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp6
-rw-r--r--src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp6
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp8
-rw-r--r--src/backends/cl/workloads/ClConvolution3dWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClDequantizeWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClDivisionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClExpWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClFillWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClFloorFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClGatherNdWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClGatherWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClLogWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClLogicalAndWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClLogicalNotWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClLogicalOrWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClLstmFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClMaximumWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClMeanWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClMinimumWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClMultiplicationWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClNegWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClPadWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClPermuteWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClPermuteWorkload.hpp8
-rw-r--r--src/backends/cl/workloads/ClPooling2dWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClPooling3dWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClPreluWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClQLstmWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClQuantizeWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClRankWorkload.hpp4
-rw-r--r--src/backends/cl/workloads/ClReduceWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClReshapeWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClResizeWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClRsqrtWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSinWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSliceWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSoftmaxWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSplitterWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSqrtWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSubtractionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClTransposeWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClTransposeWorkload.hpp8
-rw-r--r--src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp12
-rw-r--r--src/backends/neon/workloads/NeonAbsWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonActivationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonCastWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonComparisonWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvolution3dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDequantizeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDivisionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonExpWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonFillWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonFloorFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonGatherNdWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonGatherWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLogWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLogicalAndWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLogicalNotWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLogicalOrWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonMaximumWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonMeanWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonMinimumWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonMultiplicationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonNegWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPadWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPermuteWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPermuteWorkload.hpp8
-rw-r--r--src/backends/neon/workloads/NeonPooling2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPooling3dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPreluWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonQLstmWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonQuantizeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonRankWorkload.hpp4
-rw-r--r--src/backends/neon/workloads/NeonReduceWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonReshapeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonResizeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonRsqrtWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSinWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSliceWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSqrtWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonStackWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonStridedSliceWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSubtractionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonTileWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonTransposeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonTransposeWorkload.hpp8
-rw-r--r--src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonUnidirectionalSequenceLstmWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp12
-rw-r--r--src/backends/reference/workloads/RefActivationWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefArgMinMaxWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefBatchMatMulWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefCastWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefChannelShuffleWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefComparisonWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConcatWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.hpp6
-rw-r--r--src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDequantizeWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp4
-rw-r--r--src/backends/reference/workloads/RefFillWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefFloorWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefGatherNdWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefGatherWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefL2NormalizationWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefMeanWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.hpp8
-rw-r--r--src/backends/reference/workloads/RefPooling2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPooling3dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefPreluWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefRankWorkload.hpp5
-rw-r--r--src/backends/reference/workloads/RefReduceWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefReshapeWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefResizeWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefReverseV2Workload.cpp2
-rw-r--r--src/backends/reference/workloads/RefShapeWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefSliceWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefSoftmaxWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefSplitterWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefStackWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefTileWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefTileWorkload.hpp2
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefTransposeWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefTransposeWorkload.hpp6
-rw-r--r--src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefWorkloadUtils.hpp8
213 files changed, 510 insertions, 496 deletions
diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp
index 78c0756945..7ffba5b327 100644
--- a/include/armnn/backends/IWorkload.hpp
+++ b/include/armnn/backends/IWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -41,6 +41,9 @@ public:
// Replace input tensor handle with the given TensorHandle
virtual void ReplaceInputTensorHandle(ITensorHandle* /*input*/, unsigned int /*slot*/) = 0;
+ // Returns the name of the workload
+ virtual const std::string& GetName() const = 0;
+
// Replace output tensor handle with the given TensorHandle
virtual void ReplaceOutputTensorHandle(ITensorHandle* /*output*/, unsigned int /*slot*/) = 0;
diff --git a/include/armnn/backends/Workload.hpp b/include/armnn/backends/Workload.hpp
index 9f7aad0b55..9d5fec98cd 100644
--- a/include/armnn/backends/Workload.hpp
+++ b/include/armnn/backends/Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -36,11 +36,17 @@ public:
BaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
: m_Data(descriptor),
- m_Guid(arm::pipe::IProfilingService::GetNextGuid())
+ m_Guid(arm::pipe::IProfilingService::GetNextGuid()),
+ m_Name(info.m_Name)
{
m_Data.Validate(info);
}
+ virtual const std::string& GetName() const override
+ {
+ return m_Name;
+ }
+
void ExecuteAsync(ExecutionData& executionData) override
{
ARMNN_LOG(info) << "Using default async workload execution, this will network affect performance";
@@ -82,6 +88,7 @@ public:
protected:
QueueDescriptor m_Data;
const arm::pipe::ProfilingGuid m_Guid;
+ const std::string m_Name;
private:
#if !defined(ARMNN_DISABLE_THREADS)
diff --git a/include/armnn/backends/WorkloadInfo.hpp b/include/armnn/backends/WorkloadInfo.hpp
index a7a1e1e81a..23a5172d83 100644
--- a/include/armnn/backends/WorkloadInfo.hpp
+++ b/include/armnn/backends/WorkloadInfo.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -20,6 +20,7 @@ struct WorkloadInfo
Optional<TensorInfo> m_WeightsTensorInfo = EmptyOptional();
Optional<TensorInfo> m_BiasTensorInfo = EmptyOptional();
Optional<std::string> m_ConvolutionMethod = EmptyOptional();
+ std::string m_Name;
};
struct MemoryInfo
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 4f69e78b62..7bcfa3a49b 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -411,6 +411,7 @@ protected:
WorkloadInfo info;
CollectQueueDescriptorInputs(descriptor, info);
CollectQueueDescriptorOutputs(descriptor, info);
+ info.m_Name = GetName();
return info;
}
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index a55146e704..8935a8ff35 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1466,7 +1466,11 @@ void CreateReferenceDynamicBackendTestImpl()
WorkloadInfo workloadInfo
{
{ inputInfo, weightInfo },
- { outputInfo }
+ { outputInfo },
+ EmptyOptional(),
+ EmptyOptional(),
+ EmptyOptional(),
+ "Name"
};
convolution2dQueueDescriptor.m_Inputs.push_back(nullptr);
diff --git a/src/backends/cl/workloads/ClAbsWorkload.cpp b/src/backends/cl/workloads/ClAbsWorkload.cpp
index c108bd4432..b08cb0a955 100644
--- a/src/backends/cl/workloads/ClAbsWorkload.cpp
+++ b/src/backends/cl/workloads/ClAbsWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,14 +34,14 @@ ClAbsWorkload::ClAbsWorkload(const AbsQueueDescriptor& descriptor,
arm_compute::ICLTensor& input = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClAbsWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAbsWorkload_configure");
m_AbsLayer.configure(clCompileContext, &input, &output);
}
}
void ClAbsWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClAbsWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAbsWorkload_Execute");
RunClFunction(m_AbsLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClActivationWorkload.cpp b/src/backends/cl/workloads/ClActivationWorkload.cpp
index a92f8fb573..08a8a47ba3 100644
--- a/src/backends/cl/workloads/ClActivationWorkload.cpp
+++ b/src/backends/cl/workloads/ClActivationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,14 +48,14 @@ ClActivationWorkload::ClActivationWorkload(const ActivationQueueDescriptor& desc
arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClActivationWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClActivationWorkload_configure");
m_ActivationLayer.configure(clCompileContext, &input, &output, activationLayerInfo);
}
}
void ClActivationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClActivationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClActivationWorkload_Execute");
RunClFunction(m_ActivationLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp
index afdd1bb23a..ceffc082fc 100644
--- a/src/backends/cl/workloads/ClAdditionWorkload.cpp
+++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,14 +31,14 @@ ClAdditionWorkload::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClAdditionWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAdditionWorkload_configure");
m_Layer.configure(clCompileContext, &input0, &input1, &output, g_AclConvertPolicy, activationInfo);
}
}
void ClAdditionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClAdditionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClAdditionWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
index 1f81f7d26e..6290f8ccd3 100644
--- a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -71,7 +71,7 @@ ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descrip
int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClArgMinMaxWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClArgMinMaxWorkload_configure");
if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max)
{
m_ArgMinMaxLayer.configure(clCompileContext,
@@ -93,7 +93,7 @@ ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descrip
void ClArgMinMaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClArgMinMaxWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClArgMinMaxWorkload_Execute");
RunClFunction(m_ArgMinMaxLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp b/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp
index d1a2e4b175..5dd542e004 100644
--- a/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchMatMulWorkload.cpp
@@ -108,7 +108,7 @@ ClBatchMatMulWorkload::ClBatchMatMulWorkload(const BatchMatMulQueueDescriptor& d
void ClBatchMatMulWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClBatchMatMulWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchMatMulWorkload_Execute");
RunClFunction(m_MatMulLayer, CHECK_LOCATION());
}
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index 389605f17d..b19dc30493 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -87,7 +87,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClBatchNormalizationFloatWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_configure");
m_Layer.configure(clCompileContext,
&input,
&output,
@@ -112,7 +112,7 @@ ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload(
void ClBatchNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClBatchNormalizationFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
index f6d96041cc..2ffbbdce53 100644
--- a/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchToSpaceNdWorkload.cpp
@@ -154,7 +154,7 @@ ClBatchToSpaceNdWorkload::ClBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDesc
const arm_compute::CropInfo cropInfo = BuildArmComputeCropInfo(descriptor.m_Parameters);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClBatchToSpaceNdWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchToSpaceNdWorkload_configure");
m_Layer.configure(clCompileContext,
(rank == 3) ? &m_ReshapeInputTensor : &input,
blockWidth,
@@ -166,7 +166,7 @@ ClBatchToSpaceNdWorkload::ClBatchToSpaceNdWorkload(const BatchToSpaceNdQueueDesc
void ClBatchToSpaceNdWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClBatchToSpaceNdWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchToSpaceNdWorkload_Execute");
if (m_LayerReshapeInput)
{
m_LayerReshapeInput->run();
diff --git a/src/backends/cl/workloads/ClCastWorkload.cpp b/src/backends/cl/workloads/ClCastWorkload.cpp
index 25d52c8356..6c77266ea9 100644
--- a/src/backends/cl/workloads/ClCastWorkload.cpp
+++ b/src/backends/cl/workloads/ClCastWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,14 +36,14 @@ ClCastWorkload::ClCastWorkload(const CastQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClCastWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClCastWorkload_configure");
m_CastLayer.configure(clCompileContext, &input, &output, g_AclConvertPolicy);
}
}
void ClCastWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClCastWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClCastWorkload_Execute");
RunClFunction(m_CastLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
index bf2958782e..9ce05713b0 100644
--- a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
+++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -87,14 +87,14 @@ ClChannelShuffleWorkload::ClChannelShuffleWorkload(const ChannelShuffleQueueDesc
output.info()->set_data_layout(aclDataLayout);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClChannelShuffleWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClChannelShuffleWorkload_configure");
m_ChannelShuffleLayer.configure(clCompileContext, &input, &output, descriptor.m_Parameters.m_NumGroups);
}
}
void ClChannelShuffleWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClChannelShuffleWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClChannelShuffleWorkload_Execute");
RunClFunction(m_ChannelShuffleLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClComparisonWorkload.cpp b/src/backends/cl/workloads/ClComparisonWorkload.cpp
index 2ae7b3bed6..332d9daad9 100644
--- a/src/backends/cl/workloads/ClComparisonWorkload.cpp
+++ b/src/backends/cl/workloads/ClComparisonWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -59,14 +59,14 @@ ClComparisonWorkload::ClComparisonWorkload(const ComparisonQueueDescriptor& desc
const arm_compute::ComparisonOperation comparisonOperation = ConvertComparisonOperationToAcl(m_Data.m_Parameters);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClComparisonWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClComparisonWorkload_configure");
m_ComparisonLayer.configure(clCompileContext, &input0, &input1, &output, comparisonOperation);
}
}
void ClComparisonWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClComparisonWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClComparisonWorkload_Execute");
RunClFunction(m_ComparisonLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index 53c4e2c7ff..9a67f07ae8 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ClConcatWorkload.hpp"
@@ -89,7 +89,7 @@ ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor,
auto layer = std::make_unique<arm_compute::CLConcatenateLayer>();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConcatWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConcatWorkload_configure");
// Configure input and output tensors
size_t aclAxis = CalcAxis(descriptor.m_Parameters);
layer->configure(clCompileContext, aclInputs, &output, aclAxis);
@@ -104,7 +104,7 @@ void ClConcatWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConcatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConcatWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index d6a4ad66ef..bbf6476c0a 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -51,7 +51,7 @@ ClConstantWorkload::ClConstantWorkload(const ConstantQueueDescriptor& descriptor
void ClConstantWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConstantWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConstantWorkload_Execute");
// The intermediate tensor held by the corresponding layer output handler can be initialised with the given data
// on the first inference, then reused for subsequent inferences.
diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
index 4ac1274130..cfbb7ac4b8 100644
--- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,14 +30,14 @@ ClConvertFp16ToFp32Workload::ClConvertFp16ToFp32Workload(
m_OutputProxy = std::make_unique<ICLTensorProxy>(&output);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvertFp16ToFp32Workload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp16ToFp32Workload_configure");
m_Layer.configure(clCompileContext, m_InputProxy.get(), m_OutputProxy.get(), g_AclConvertPolicy, 0);
}
}
void ClConvertFp16ToFp32Workload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvertFp16ToFp32Workload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp16ToFp32Workload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 307314d784..72b84bbdf9 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,14 +30,14 @@ ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload(
m_OutputProxy = std::make_unique<ICLTensorProxy>(&output);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvertFp32ToFp16Workload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp32ToFp16Workload_configure");
m_Layer.configure(clCompileContext, m_InputProxy.get(), m_OutputProxy.get(), g_AclConvertPolicy, 0);
}
}
void ClConvertFp32ToFp16Workload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvertFp32ToFp16Workload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvertFp32ToFp16Workload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 00e39bdedf..7c3b102412 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -81,7 +81,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
: ClBaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
, m_ConvolutionLayer(memoryManager)
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution2dWorkload");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution2dWorkload");
const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX,
m_Data.m_Parameters.m_DilationY);
@@ -118,7 +118,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution2dWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution2dWorkload_configure");
m_ConvolutionLayer.configure(clCompileContext,
m_InputProxy.get(),
m_WeightsProxy.get(),
@@ -163,7 +163,7 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
void ClConvolution2dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvolution2dWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution2dWorkload_Execute");
RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClConvolution3dWorkload.cpp b/src/backends/cl/workloads/ClConvolution3dWorkload.cpp
index 7480dbd64b..417b7fbfc4 100644
--- a/src/backends/cl/workloads/ClConvolution3dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -88,7 +88,7 @@ ClConvolution3dWorkload::ClConvolution3dWorkload(const Convolution3dQueueDescrip
isFastMathEnabled);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClConvolution3dWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution3dWorkload_configure");
m_ConvolutionLayer.configure(clCompileContext,
&input,
&weights,
@@ -115,7 +115,7 @@ ClConvolution3dWorkload::ClConvolution3dWorkload(const Convolution3dQueueDescrip
void ClConvolution3dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvolution3dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClConvolution3dWorkload_Execute");
RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
index 28d700c2a0..1f6823c062 100644
--- a/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -62,14 +62,14 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor
output.info()->set_data_layout(aclDataLayout);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDepthToSpaceWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthToSpaceWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output, blockSize);
}
}
void ClDepthToSpaceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDepthToSpaceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthToSpaceWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 91366d765e..31d77a3d2b 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -128,7 +128,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDepthwiseConvolutionWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthwiseConvolutionWorkload_configure");
static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_DepthwiseConvolutionLayer.get())->configure(
clCompileContext,
&input,
@@ -163,7 +163,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
void ClDepthwiseConvolutionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDepthwiseConvolutionWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthwiseConvolutionWorkload_Execute");
ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
diff --git a/src/backends/cl/workloads/ClDequantizeWorkload.cpp b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
index 0081fb8d25..5b0d5e6b51 100644
--- a/src/backends/cl/workloads/ClDequantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClDequantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -42,7 +42,7 @@ ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& desc
m_Layer.reset(new arm_compute::CLDequantizationLayer());
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDequantizeWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDequantizeWorkload_configure");
m_Layer->configure(clCompileContext, &input, &output);
}
m_Layer->prepare();
@@ -52,7 +52,7 @@ void ClDequantizeWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDequantizeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDequantizeWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/cl/workloads/ClDivisionWorkload.cpp b/src/backends/cl/workloads/ClDivisionWorkload.cpp
index cfcb1046cc..0fde03d640 100644
--- a/src/backends/cl/workloads/ClDivisionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDivisionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,14 +45,14 @@ ClDivisionWorkload::ClDivisionWorkload(const DivisionQueueDescriptor& descriptor
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDivisionWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDivisionWorkload_configure");
m_ArithmeticDivision.configure(clCompileContext, &input0, &input1, &output, activationInfo);
}
}
void ClDivisionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDivisionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDivisionWorkload_Execute");
RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp
index df30feb52a..b3cdcbae2a 100644
--- a/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp
+++ b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp
@@ -29,7 +29,7 @@ ClElementwiseBinaryWorkload::ClElementwiseBinaryWorkload(const ElementwiseBinary
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClElementwiseBinaryWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClElementwiseBinaryWorkload_configure");
switch (descriptor.m_Parameters.m_Operation)
{
@@ -56,7 +56,7 @@ void ClElementwiseBinaryWorkload::Execute() const
{
if (m_ElementwiseBinaryLayer)
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClElementwiseBinaryWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClElementwiseBinaryWorkload_Execute");
m_ElementwiseBinaryLayer->run();
}
}
diff --git a/src/backends/cl/workloads/ClExpWorkload.cpp b/src/backends/cl/workloads/ClExpWorkload.cpp
index 15da905051..3ddb588731 100644
--- a/src/backends/cl/workloads/ClExpWorkload.cpp
+++ b/src/backends/cl/workloads/ClExpWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,14 +40,14 @@ ClExpWorkload::ClExpWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClExpWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClExpWorkload_configure");
m_ExpLayer.configure(clCompileContext, &input, &output);
}
}
void ClExpWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClExpWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClExpWorkload_Execute");
RunClFunction(m_ExpLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClFillWorkload.cpp b/src/backends/cl/workloads/ClFillWorkload.cpp
index d0a43a2cee..d09722bccf 100644
--- a/src/backends/cl/workloads/ClFillWorkload.cpp
+++ b/src/backends/cl/workloads/ClFillWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,14 +32,14 @@ ClFillWorkload::ClFillWorkload(const FillQueueDescriptor& descriptor,
arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFillWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFillWorkload_configure");
m_Layer.configure(clCompileContext, &output, pixelValue);
}
}
void ClFillWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClFillWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFillWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
index 0aae1a30e3..06074b8bf8 100644
--- a/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClFloorFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,14 +30,14 @@ ClFloorFloatWorkload::ClFloorFloatWorkload(const FloorQueueDescriptor& descripto
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFloorFloatWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFloorFloatWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output);
}
}
void ClFloorFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClFloorFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFloorFloatWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 727ae5634a..8730e738d8 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -77,7 +77,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload(
activationInfo);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClFullyConnectedWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFullyConnectedWorkload_configure");
m_FullyConnectedLayer.configure(clCompileContext,
&input,
&weights,
@@ -106,7 +106,7 @@ ClFullyConnectedWorkload::ClFullyConnectedWorkload(
void ClFullyConnectedWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClFullyConnectedWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClFullyConnectedWorkload_Execute");
RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClGatherNdWorkload.cpp b/src/backends/cl/workloads/ClGatherNdWorkload.cpp
index f68914645e..1351f9685f 100644
--- a/src/backends/cl/workloads/ClGatherNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -186,7 +186,7 @@ ClGatherNdWorkload::ClGatherNdWorkload(const GatherNdQueueDescriptor& descriptor
BuildArmComputeTensor(m_OutputGather, outputGather_Info);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_OutputGather);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClGatherNdWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherNdWorkload_configure");
auto aclAxis = ComputeAclAxis(0, paramsInfo);
m_GatherLayer.configure(clCompileContext, &input, &m_FlattenedIndices, &m_OutputGather, aclAxis);
}
@@ -197,7 +197,7 @@ ClGatherNdWorkload::ClGatherNdWorkload(const GatherNdQueueDescriptor& descriptor
void ClGatherNdWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClGatherNdWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherNdWorkload_Execute");
RunClFunction(m_MulLayer, CHECK_LOCATION());
RunClFunction(m_ReduceSumLayer, CHECK_LOCATION());
RunClFunction(m_GatherLayer, CHECK_LOCATION());
diff --git a/src/backends/cl/workloads/ClGatherWorkload.cpp b/src/backends/cl/workloads/ClGatherWorkload.cpp
index 55bf422d19..281dfc1709 100644
--- a/src/backends/cl/workloads/ClGatherWorkload.cpp
+++ b/src/backends/cl/workloads/ClGatherWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,14 +46,14 @@ ClGatherWorkload::ClGatherWorkload(const GatherQueueDescriptor& descriptor,
int aclAxis = ComputeAclAxis(descriptor.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClGatherWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherWorkload_configure");
m_Layer.configure(clCompileContext, &input, &indices, &output, aclAxis);
}
};
void ClGatherWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClGatherWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClGatherWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
} // namespace armnn
diff --git a/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp b/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp
index 54114c11d3..02e9af884d 100644
--- a/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp
+++ b/src/backends/cl/workloads/ClInstanceNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -51,7 +51,7 @@ ClInstanceNormalizationWorkload::ClInstanceNormalizationWorkload(
output.info()->set_data_layout(aclDataLayout);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClInstanceNormalizationWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClInstanceNormalizationWorkload_configure");
m_Layer.configure(clCompileContext,
&input,
&output,
@@ -63,7 +63,7 @@ ClInstanceNormalizationWorkload::ClInstanceNormalizationWorkload(
void ClInstanceNormalizationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClInstanceNormalizationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClInstanceNormalizationWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index d120fb28f6..356df9a470 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -49,14 +49,14 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza
int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0;
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClL2NormalizationFloatWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClL2NormalizationFloatWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output, axis, m_Data.m_Parameters.m_Eps);
}
}
void ClL2NormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClL2NormalizationFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClL2NormalizationFloatWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp b/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp
index 67c366d1b1..5a3ba65893 100644
--- a/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,14 +46,14 @@ ClLogSoftmaxWorkload::ClLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& desc
int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogSoftmaxWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogSoftmaxWorkload_configure");
m_LogSoftmaxLayer.configure(clCompileContext, &input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
}
}
void ClLogSoftmaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogSoftmaxWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogSoftmaxWorkload_Execute");
RunClFunction(m_LogSoftmaxLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClLogWorkload.cpp b/src/backends/cl/workloads/ClLogWorkload.cpp
index 024a634093..a9bdbf5c53 100644
--- a/src/backends/cl/workloads/ClLogWorkload.cpp
+++ b/src/backends/cl/workloads/ClLogWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,14 +34,14 @@ ClLogWorkload::ClLogWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogWorkload_configure");
m_LogLayer.configure(clCompileContext, &input, &output);
}
}
void ClLogWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogWorkload_Execute");
RunClFunction(m_LogLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClLogicalAndWorkload.cpp b/src/backends/cl/workloads/ClLogicalAndWorkload.cpp
index c37a300a1c..d74eec0cb2 100644
--- a/src/backends/cl/workloads/ClLogicalAndWorkload.cpp
+++ b/src/backends/cl/workloads/ClLogicalAndWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -49,14 +49,14 @@ ClLogicalAndWorkload::ClLogicalAndWorkload(const LogicalBinaryQueueDescriptor& d
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogicalAndWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalAndWorkload_configure");
m_LogicalAndLayer.configure(clCompileContext, &input0, &input1, &output);
}
}
void ClLogicalAndWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogicalAndWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalAndWorkload_Execute");
m_LogicalAndLayer.run();
}
diff --git a/src/backends/cl/workloads/ClLogicalNotWorkload.cpp b/src/backends/cl/workloads/ClLogicalNotWorkload.cpp
index 9d2f8fd4d2..5636a6a47c 100644
--- a/src/backends/cl/workloads/ClLogicalNotWorkload.cpp
+++ b/src/backends/cl/workloads/ClLogicalNotWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,14 +45,14 @@ ClLogicalNotWorkload::ClLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogicalNotWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalNotWorkload_configure");
m_LogicalNotLayer.configure(clCompileContext, &input, &output);
}
}
void ClLogicalNotWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogicalNotWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalNotWorkload_Execute");
m_LogicalNotLayer.run();
}
diff --git a/src/backends/cl/workloads/ClLogicalOrWorkload.cpp b/src/backends/cl/workloads/ClLogicalOrWorkload.cpp
index 7e3cce1d95..961f519e4c 100644
--- a/src/backends/cl/workloads/ClLogicalOrWorkload.cpp
+++ b/src/backends/cl/workloads/ClLogicalOrWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -49,14 +49,14 @@ ClLogicalOrWorkload::ClLogicalOrWorkload(const LogicalBinaryQueueDescriptor& des
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLogicalOrWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalOrWorkload_configure");
m_LogicalOrLayer.configure(clCompileContext, &input0, &input1, &output);
}
}
void ClLogicalOrWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLogicalOrWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLogicalOrWorkload_Execute");
m_LogicalOrLayer.run();
}
diff --git a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
index d20c6fc7b5..e5f4e23a7d 100644
--- a/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClLstmFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -168,7 +168,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor,
ConvertLstmActivationFuncToAclLayerInfo(m_Data.m_Parameters.m_ActivationFunc);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLstmFloatWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLstmFloatWorkload_configure");
m_LstmLayer.configure(clCompileContext, &input, m_InputToForgetWeightsTensor.get(),
m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),
m_RecurrentToForgetWeightsTensor.get(), m_RecurrentToCellWeightsTensor.get(),
@@ -237,7 +237,7 @@ ClLstmFloatWorkload::ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor,
void ClLstmFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLstmFloatWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClLstmFloatWorkload_Execute");
RunClFunction(m_LstmLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClMaximumWorkload.cpp b/src/backends/cl/workloads/ClMaximumWorkload.cpp
index 21f1a2324f..58946f1111 100644
--- a/src/backends/cl/workloads/ClMaximumWorkload.cpp
+++ b/src/backends/cl/workloads/ClMaximumWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,14 +48,14 @@ ClMaximumWorkload::ClMaximumWorkload(const MaximumQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMaximumWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMaximumWorkload_configure");
m_MaximumLayer.configure(clCompileContext, &input0, &input1, &output);
}
}
void ClMaximumWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMaximumWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMaximumWorkload_Execute");
RunClFunction(m_MaximumLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClMeanWorkload.cpp b/src/backends/cl/workloads/ClMeanWorkload.cpp
index b59eb6f8e4..4241be1ceb 100644
--- a/src/backends/cl/workloads/ClMeanWorkload.cpp
+++ b/src/backends/cl/workloads/ClMeanWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,14 +48,14 @@ ClMeanWorkload::ClMeanWorkload(const MeanQueueDescriptor& descriptor,
m_Data.m_Parameters.m_Axis);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMeanWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMeanWorkload_configure");
m_Layer.configure(clCompileContext, &input, coords, m_Data.m_Parameters.m_KeepDims, &output);
}
}
void ClMeanWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMeanWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMeanWorkload_Execute");
m_Layer.run();
}
diff --git a/src/backends/cl/workloads/ClMinimumWorkload.cpp b/src/backends/cl/workloads/ClMinimumWorkload.cpp
index 5c329062a3..7dafb704c0 100644
--- a/src/backends/cl/workloads/ClMinimumWorkload.cpp
+++ b/src/backends/cl/workloads/ClMinimumWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,14 +48,14 @@ ClMinimumWorkload::ClMinimumWorkload(const MinimumQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMinimumWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMinimumWorkload_configure");
m_MinimumLayer.configure(clCompileContext, &input0, &input1, &output);
}
}
void ClMinimumWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMinimumWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMinimumWorkload_Execute");
RunClFunction(m_MinimumLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
index 99822b3a65..63eee4a946 100644
--- a/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
+++ b/src/backends/cl/workloads/ClMultiplicationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -63,7 +63,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClMultiplicationWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMultiplicationWorkload_configure");
// Construct
m_PixelWiseMultiplication.configure(clCompileContext,
&input0,
@@ -78,7 +78,7 @@ ClMultiplicationWorkload::ClMultiplicationWorkload(const MultiplicationQueueDesc
void ClMultiplicationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClMultiplicationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMultiplicationWorkload_Execute");
RunClFunction(m_PixelWiseMultiplication, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClNegWorkload.cpp b/src/backends/cl/workloads/ClNegWorkload.cpp
index 94b5fcbdb6..9bd205cd1f 100644
--- a/src/backends/cl/workloads/ClNegWorkload.cpp
+++ b/src/backends/cl/workloads/ClNegWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,14 +34,14 @@ ClNegWorkload::ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClNegWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNegWorkload_configure");
m_NegLayer.configure(clCompileContext, &input, &output);
}
}
void ClNegWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClNegWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNegWorkload_Execute");
RunClFunction(m_NegLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
index 8de8dd5c3b..f218fa4db6 100644
--- a/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClNormalizationFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -51,14 +51,14 @@ ClNormalizationFloatWorkload::ClNormalizationFloatWorkload(const NormalizationQu
arm_compute::NormalizationLayerInfo normalizationInfo = BuildArmComputeNormalizationLayerInfo(m_Data.m_Parameters);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClNormalizationFloatWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNormalizationFloatWorkload_configure");
m_NormalizationLayer.configure(clCompileContext, &input, &output, normalizationInfo);
}
};
void ClNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClNormalizationFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClNormalizationFloatWorkload_Execute");
RunClFunction(m_NormalizationLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index aecfb278c5..09169ec4b8 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor,
arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPadWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPadWorkload_configure");
m_Layer.configure(clCompileContext,
&input,
&output,
@@ -55,7 +55,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor,
void ClPadWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPadWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPadWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp
index f3d12ae72c..cfc7c79c2d 100644
--- a/src/backends/cl/workloads/ClPermuteWorkload.cpp
+++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,7 +46,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPermuteWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPermuteWorkload_configure");
// Run the layer.
m_PermuteFunction.configure(clCompileContext, &input, &output, BuildArmComputePermutationVector(mappings));
}
@@ -54,7 +54,7 @@ ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
void ClPermuteWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(GetName() + "_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPermuteWorkload_Execute");
RunClFunction(m_PermuteFunction, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClPermuteWorkload.hpp b/src/backends/cl/workloads/ClPermuteWorkload.hpp
index a7afbc7b34..8f2a91dc1f 100644
--- a/src/backends/cl/workloads/ClPermuteWorkload.hpp
+++ b/src/backends/cl/workloads/ClPermuteWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,12 +23,6 @@ arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo& input,
class ClPermuteWorkload : public ClBaseWorkload<PermuteQueueDescriptor>
{
public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("ClPermuteWorkload");
- return name;
- }
-
ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info,
const arm_compute::CLCompileContext& clCompileContext);
diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
index 40a794ea2e..16464dae82 100644
--- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -56,7 +56,7 @@ ClPooling2dWorkload::ClPooling2dWorkload(
arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters, fpMixedPrecision);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPooling2dWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling2dWorkload_configure");
// Run the layer.
m_PoolingLayer.configure(clCompileContext, &input, &output, layerInfo);
}
@@ -64,7 +64,7 @@ ClPooling2dWorkload::ClPooling2dWorkload(
void ClPooling2dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPooling2dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling2dWorkload_Execute");
RunClFunction(m_PoolingLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClPooling3dWorkload.cpp b/src/backends/cl/workloads/ClPooling3dWorkload.cpp
index a896110a2e..d8e94cbf4b 100644
--- a/src/backends/cl/workloads/ClPooling3dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -56,7 +56,7 @@ namespace armnn
fpMixedPrecision);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPooling3dWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling3dWorkload_configure");
// Run the layer.
m_PoolingLayer.configure(clCompileContext, &input, &output, layerInfo);
}
@@ -64,7 +64,7 @@ namespace armnn
void ClPooling3dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPooling3dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling3dWorkload_Execute");
RunClFunction(m_PoolingLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClPreluWorkload.cpp b/src/backends/cl/workloads/ClPreluWorkload.cpp
index b2b8eebfaf..9c678daa44 100644
--- a/src/backends/cl/workloads/ClPreluWorkload.cpp
+++ b/src/backends/cl/workloads/ClPreluWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,14 +38,14 @@ ClPreluWorkload::ClPreluWorkload(const PreluQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClPreluWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPreluWorkload_configure");
m_PreluLayer.configure(clCompileContext, &input, &alpha, &output);
}
}
void ClPreluWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClPreluWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPreluWorkload_Execute");
RunClFunction(m_PreluLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClQLstmWorkload.cpp b/src/backends/cl/workloads/ClQLstmWorkload.cpp
index 92090e666c..5c05b44ab7 100644
--- a/src/backends/cl/workloads/ClQLstmWorkload.cpp
+++ b/src/backends/cl/workloads/ClQLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -159,7 +159,7 @@ ClQLstmWorkload::ClQLstmWorkload(const QLstmQueueDescriptor& descriptor,
m_Data.m_Parameters.m_OutputIntermediateScale);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClQLstmWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQLstmWorkload_configure");
// QLSTM CL configure
m_QLstmLayer.configure(clCompileContext,
&input,
@@ -240,7 +240,7 @@ ClQLstmWorkload::ClQLstmWorkload(const QLstmQueueDescriptor& descriptor,
void ClQLstmWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizedLstmWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_Execute");
m_QLstmLayer.run();
}
diff --git a/src/backends/cl/workloads/ClQuantizeWorkload.cpp b/src/backends/cl/workloads/ClQuantizeWorkload.cpp
index add2f3d9a0..07d5766ef6 100644
--- a/src/backends/cl/workloads/ClQuantizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClQuantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,14 +40,14 @@ ClQuantizeWorkload::ClQuantizeWorkload(const QuantizeQueueDescriptor& descriptor
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClQuantizeWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizeWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output);
}
}
void ClQuantizeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizeWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
index 0fb19ecd71..dc7cc388d1 100644
--- a/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
+++ b/src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -110,7 +110,7 @@ ClQuantizedLstmWorkload::ClQuantizedLstmWorkload(const QuantizedLstmQueueDescrip
arm_compute::ICLTensor& outputStateOutTensor = static_cast<IClTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClQuantizedLstmWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_configure");
m_QuantizedLstmLayer.configure(clCompileContext, &inputTensor, m_InputToInputWeightsTensor.get(),
m_InputToForgetWeightsTensor.get(),
m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),
@@ -141,7 +141,7 @@ ClQuantizedLstmWorkload::ClQuantizedLstmWorkload(const QuantizedLstmQueueDescrip
void ClQuantizedLstmWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizedLstmWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_Execute");
RunClFunction(m_QuantizedLstmLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClRankWorkload.hpp b/src/backends/cl/workloads/ClRankWorkload.hpp
index 8a7e2c2078..b87b6b8866 100644
--- a/src/backends/cl/workloads/ClRankWorkload.hpp
+++ b/src/backends/cl/workloads/ClRankWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,6 +19,8 @@ public:
using ClBaseWorkload<RankQueueDescriptor>::ClBaseWorkload;
virtual void Execute() const override
{
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClRankWorkload_Execute");
+
const ClTensorHandle* clTensorHandle = PolymorphicDowncast<const ClTensorHandle*>(m_Data.m_Inputs[0]);
const int32_t rank = static_cast<int32_t>(clTensorHandle->GetShape().GetNumDimensions());
diff --git a/src/backends/cl/workloads/ClReduceWorkload.cpp b/src/backends/cl/workloads/ClReduceWorkload.cpp
index ace76935c4..b9056c1a8e 100644
--- a/src/backends/cl/workloads/ClReduceWorkload.cpp
+++ b/src/backends/cl/workloads/ClReduceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,7 +61,7 @@ ClReduceWorkload::ClReduceWorkload(const ReduceQueueDescriptor& descriptor, cons
info.m_InputTensorInfos[0].GetNumDimensions(),
m_Data.m_Parameters.m_vAxis);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClReduceWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReduceWorkload_configure");
m_Layer.configure(&input,
&output,
static_cast<unsigned int>(coords[0]),
@@ -72,7 +72,7 @@ ClReduceWorkload::ClReduceWorkload(const ReduceQueueDescriptor& descriptor, cons
void ClReduceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClReduceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReduceWorkload_Execute");
m_Layer.run();
}
diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp
index b666e7cc7b..7fa5ee01d1 100644
--- a/src/backends/cl/workloads/ClReshapeWorkload.cpp
+++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,14 +32,14 @@ ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClReshapeWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReshapeWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output);
}
}
void ClReshapeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClReshapeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClReshapeWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClResizeWorkload.cpp b/src/backends/cl/workloads/ClResizeWorkload.cpp
index 7d6d938d5e..e86feb461d 100644
--- a/src/backends/cl/workloads/ClResizeWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -74,7 +74,7 @@ ClResizeWorkload::ClResizeWorkload(const ResizeQueueDescriptor& descriptor,
: arm_compute::SamplingPolicy::TOP_LEFT;
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClResizeWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClResizeWorkload_configure");
m_ResizeLayer.configure(clCompileContext,
&input,
&output,
@@ -90,7 +90,7 @@ ClResizeWorkload::ClResizeWorkload(const ResizeQueueDescriptor& descriptor,
void ClResizeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClResizeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClResizeWorkload_Execute");
RunClFunction(m_ResizeLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClRsqrtWorkload.cpp b/src/backends/cl/workloads/ClRsqrtWorkload.cpp
index 3bc5f38166..441657f2ef 100644
--- a/src/backends/cl/workloads/ClRsqrtWorkload.cpp
+++ b/src/backends/cl/workloads/ClRsqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,14 +34,14 @@ ClRsqrtWorkload::ClRsqrtWorkload(const RsqrtQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClRsqrtWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClRsqrtWorkload_configure");
m_RsqrtLayer.configure(clCompileContext, &input, &output);
}
}
void ClRsqrtWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClRsqrtWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClRsqrtWorkload_Execute");
RunClFunction(m_RsqrtLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClSinWorkload.cpp b/src/backends/cl/workloads/ClSinWorkload.cpp
index bcab32fa9a..0eabf13e73 100644
--- a/src/backends/cl/workloads/ClSinWorkload.cpp
+++ b/src/backends/cl/workloads/ClSinWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,14 +34,14 @@ ClSinWorkload::ClSinWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSinWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSinWorkload_configure");
m_SinLayer.configure(clCompileContext, &input, &output);
}
}
void ClSinWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSinWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSinWorkload_Execute");
RunClFunction(m_SinLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClSliceWorkload.cpp b/src/backends/cl/workloads/ClSliceWorkload.cpp
index 3976e120d2..30b05ca7fb 100644
--- a/src/backends/cl/workloads/ClSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,14 +52,14 @@ ClSliceWorkload::ClSliceWorkload(const SliceQueueDescriptor& descriptor,
std::tie(starts, ends) = SetClSliceData(m_Data.m_Parameters.m_Begin, m_Data.m_Parameters.m_Size);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSliceWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSliceWorkload_configure");
m_SliceFunction.configure(clCompileContext, &input, &output, starts, ends);
}
}
void ClSliceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSliceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSliceWorkload_Execute");
RunClFunction(m_SliceFunction, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClSoftmaxWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxWorkload.cpp
index 99bc89e200..d884f3022e 100644
--- a/src/backends/cl/workloads/ClSoftmaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,14 +45,14 @@ ClSoftmaxWorkload::ClSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor,
int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSoftmaxWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSoftmaxWorkload_configure");
m_SoftmaxLayer.configure(clCompileContext, &input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
}
}
void ClSoftmaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSoftmaxWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSoftmaxWorkload_Execute");
RunClFunction(m_SoftmaxLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
index fd90adb2ff..6ecdff1157 100644
--- a/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
@@ -164,7 +164,7 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDesc
descriptor.m_Parameters.m_PadList[0].second);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSpaceToBatchNdWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToBatchNdWorkload_configure");
m_Layer.configure(clCompileContext,
rank == 3 ? &m_ReshapeInputTensor : &input,
blockWidth,
@@ -177,7 +177,7 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(const SpaceToBatchNdQueueDesc
void ClSpaceToBatchNdWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSpaceToBatchNdWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToBatchNdWorkload_Execute");
if (m_LayerReshapeInput)
{
m_LayerReshapeInput->run();
diff --git a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
index da1a350290..a106436799 100644
--- a/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
+++ b/src/backends/cl/workloads/ClSpaceToDepthWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,14 +40,14 @@ ClSpaceToDepthWorkload::ClSpaceToDepthWorkload(const SpaceToDepthQueueDescriptor
output.info()->set_data_layout(aclDataLayout);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSpaceToDepthWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToDepthWorkload_configure");
m_Layer.configure(clCompileContext, &input, &output, blockSize);
}
}
void ClSpaceToDepthWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSpaceToDepthWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSpaceToDepthWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
index f4622ce26d..ec904eb51b 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.cpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -103,7 +103,7 @@ ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor
unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
auto layer = std::make_unique<arm_compute::CLSplit>();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSplitterWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSplitterWorkload_configure");
layer->configure(&input, aclOutputs, aclAxis);
}
@@ -117,7 +117,7 @@ void ClSplitterWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSplitterWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSplitterWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/cl/workloads/ClSqrtWorkload.cpp b/src/backends/cl/workloads/ClSqrtWorkload.cpp
index b78c114cf7..e36adf6d4c 100644
--- a/src/backends/cl/workloads/ClSqrtWorkload.cpp
+++ b/src/backends/cl/workloads/ClSqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -53,14 +53,14 @@ ClSqrtWorkload::ClSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor
arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSqrtWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSqrtWorkload_configure");
m_SqrtLayer.configure(clCompileContext, &input, &output, activationLayerInfo);
}
}
void ClSqrtWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSqrtWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSqrtWorkload_Execute");
RunClFunction(m_SqrtLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
index 46b4702783..f25a3c0fbe 100644
--- a/src/backends/cl/workloads/ClStackWorkload.cpp
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ClStackWorkload.hpp"
@@ -67,7 +67,7 @@ ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor,
m_Layer.reset(new arm_compute::CLStackLayer());
int aclAxis = CalcAxis(descriptor.m_Parameters.m_Axis, descriptor.m_Parameters.m_InputShape.GetNumDimensions());
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClStackWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStackWorkload_configure");
m_Layer->configure(clCompileContext, aclInputs, aclAxis, &output);
}
}
@@ -76,7 +76,7 @@ void ClStackWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClStackWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStackWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
index 62a59feed4..3889c20e0f 100644
--- a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -86,7 +86,7 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor
output.info()->set_data_layout(aclDataLayout);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClStridedSliceWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStridedSliceWorkload_configure");
m_StridedSliceLayer.configure(clCompileContext,
&input,
&output,
@@ -101,7 +101,7 @@ ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor
void ClStridedSliceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClStridedSliceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClStridedSliceWorkload_Execute");
RunClFunction(m_StridedSliceLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
index 789d457ff4..31bd5de14d 100644
--- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp
+++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,14 +32,14 @@ ClSubtractionWorkload::ClSubtractionWorkload(const SubtractionQueueDescriptor& d
const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClSubtractionWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSubtractionWorkload_configure");
m_Layer.configure(clCompileContext, &input0, &input1, &output, g_AclConvertPolicy, activationInfo);
}
}
void ClSubtractionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClSubtractionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClSubtractionWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index 96c0a81a2f..d3eeadeb31 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -101,7 +101,7 @@ ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload(
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClTransposeConvolution2dWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeConvolution2dWorkload_configure");
m_Layer.configure(clCompileContext, &input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output,
padStrideInfo);
}
@@ -119,7 +119,7 @@ ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload(
void ClTransposeConvolution2dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClTransposeConvolution2dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeConvolution2dWorkload_Execute");
RunClFunction(m_Layer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClTransposeWorkload.cpp b/src/backends/cl/workloads/ClTransposeWorkload.cpp
index 383f5f1faf..8f3ccdbe99 100644
--- a/src/backends/cl/workloads/ClTransposeWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,7 @@ ClTransposeWorkload::ClTransposeWorkload(const TransposeQueueDescriptor& descrip
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClTransposeWorkload_configure");
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeWorkload_configure");
// Run the layer.
m_PermuteFunction.configure(clCompileContext,
&input,
@@ -54,7 +54,7 @@ ClTransposeWorkload::ClTransposeWorkload(const TransposeQueueDescriptor& descrip
void ClTransposeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(GetName() + "_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClTransposeWorkload_Execute");
RunClFunction(m_PermuteFunction, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClTransposeWorkload.hpp b/src/backends/cl/workloads/ClTransposeWorkload.hpp
index fb4803592f..a22f631bb8 100644
--- a/src/backends/cl/workloads/ClTransposeWorkload.hpp
+++ b/src/backends/cl/workloads/ClTransposeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,12 +23,6 @@ arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo& input,
class ClTransposeWorkload : public ClBaseWorkload<TransposeQueueDescriptor>
{
public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("ClTransposeWorkload");
- return name;
- }
-
ClTransposeWorkload(const TransposeQueueDescriptor& descriptor,
const WorkloadInfo& info,
const arm_compute::CLCompileContext& clCompileContext);
diff --git a/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp b/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp
index fb31d7c283..ae2b901f65 100644
--- a/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClUnidirectionalSequenceLstmFloatWorkload.cpp
@@ -481,7 +481,7 @@ ClUnidirectionalSequenceLstmFloatWorkload::ClUnidirectionalSequenceLstmFloatWork
void ClUnidirectionalSequenceLstmFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClUnidirectionalSequenceLstmFloatWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClUnidirectionalSequenceLstmFloatWorkload_Execute");
if (m_Permute1)
{
m_Permute1->run();
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 8f2fb48238..4b491e3cec 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -28,7 +28,15 @@
#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \
ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
guid, \
- name, \
+ GetName() + "_" + name, \
+ armnn::OpenClTimer(), \
+ armnn::WallClockTimer())
+
+/// Creates a profiling event that uses GetGuid() and GetName() from the calling class
+#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label) \
+ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
+ this->GetGuid(), \
+ this->GetName() + "_" + label, \
armnn::OpenClTimer(), \
armnn::WallClockTimer())
diff --git a/src/backends/neon/workloads/NeonAbsWorkload.cpp b/src/backends/neon/workloads/NeonAbsWorkload.cpp
index 8854771b30..03e458c409 100644
--- a/src/backends/neon/workloads/NeonAbsWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAbsWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,7 @@ NeonAbsWorkload::NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const Wor
void NeonAbsWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonAbsWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonAbsWorkload_Execute");
m_AbsLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp
index 0fadc120ba..01b216f70f 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -55,7 +55,7 @@ NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor&
void NeonActivationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonActivationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonActivationWorkload_Execute");
m_ActivationLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index 004af9b239..e492169cbd 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -56,7 +56,7 @@ NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descri
void NeonAdditionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonAdditionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonAdditionWorkload_Execute");
m_AddLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
index 06f763042c..eb9d393fda 100644
--- a/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonArgMinMaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -85,7 +85,7 @@ NeonArgMinMaxWorkload::NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& des
void NeonArgMinMaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonArgMinMaxWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonArgMinMaxWorkload_Execute");
m_ArgMinMaxLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp b/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp
index 4ebf4d5ec6..74989d2e46 100644
--- a/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchMatMulWorkload.cpp
@@ -102,7 +102,7 @@ NeonBatchMatMulWorkload::NeonBatchMatMulWorkload(const BatchMatMulQueueDescripto
void NeonBatchMatMulWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonBatchMatMulWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonBatchMatMulWorkload_Execute");
m_MatMulLayer.run();
}
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index 8e78846ee4..b2b1d74f13 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -113,7 +113,7 @@ NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload(
void NeonBatchNormalizationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonBatchNormalizationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonBatchNormalizationWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
index f66849a88d..8d571aec2a 100644
--- a/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
@@ -161,7 +161,7 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue
void NeonBatchToSpaceNdWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonBatchToSpaceNdWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonBatchToSpaceNdWorkload_Execute");
if (m_LayerReshapeInput)
{
m_LayerReshapeInput->run();
diff --git a/src/backends/neon/workloads/NeonCastWorkload.cpp b/src/backends/neon/workloads/NeonCastWorkload.cpp
index bbac207e27..6a46a190c1 100644
--- a/src/backends/neon/workloads/NeonCastWorkload.cpp
+++ b/src/backends/neon/workloads/NeonCastWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,7 @@ NeonCastWorkload::NeonCastWorkload(const CastQueueDescriptor& descriptor, const
void NeonCastWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonCastWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonCastWorkload_Execute");
m_CastLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
index 2f91c7b28a..a44c9aa0d4 100644
--- a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -87,7 +87,7 @@ NeonChannelShuffleWorkload::NeonChannelShuffleWorkload(const ChannelShuffleQueue
void NeonChannelShuffleWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonChannelShuffleWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonChannelShuffleWorkload_Execute");
m_ChannelShuffleLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonComparisonWorkload.cpp b/src/backends/neon/workloads/NeonComparisonWorkload.cpp
index 23f6ca40dd..d2d127a7fc 100644
--- a/src/backends/neon/workloads/NeonComparisonWorkload.cpp
+++ b/src/backends/neon/workloads/NeonComparisonWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -53,7 +53,7 @@ NeonComparisonWorkload::NeonComparisonWorkload(const ComparisonQueueDescriptor&
void NeonComparisonWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonComparisonWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonComparisonWorkload_Execute");
m_ComparisonLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index 5b538b6481..5f613f4554 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -99,7 +99,7 @@ void NeonConcatWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConcatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConcatWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index bbdee78d8b..f5b0128dc7 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -53,7 +53,7 @@ NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descri
void NeonConstantWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConstantWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConstantWorkload_Execute");
using namespace armcomputetensorutils;
diff --git a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp
index f65d71904f..3ec8e8b6ff 100644
--- a/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvertFp16ToFp32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,7 +50,7 @@ NeonConvertFp16ToFp32Workload::NeonConvertFp16ToFp32Workload(const ConvertFp16To
void NeonConvertFp16ToFp32Workload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp16ToFp32Workload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConvertFp16ToFp32Workload_Execute");
if (m_Cast)
{
diff --git a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp
index 017ed9867e..54a35fd7fe 100644
--- a/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,7 +52,7 @@ NeonConvertFp32ToFp16Workload::NeonConvertFp32ToFp16Workload(const ConvertFp32To
void NeonConvertFp32ToFp16Workload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp32ToFp16Workload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConvertFp32ToFp16Workload_Execute");
if (m_Cast)
{
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index ae13b970c9..f7f4e66067 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -165,7 +165,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
void NeonConvolution2dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution2dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConvolution2dWorkload_Execute");
// The constant tensors may not be fully in place until the workload is Executed
if (!prepared)
{
diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
index 136e533f59..5bf6e100ed 100644
--- a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -113,7 +113,7 @@ NeonConvolution3dWorkload::NeonConvolution3dWorkload(const Convolution3dQueueDes
void NeonConvolution3dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution3dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConvolution3dWorkload_Execute");
m_ConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
index c3c069a1d1..509c783c7e 100644
--- a/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -59,7 +59,7 @@ NeonDepthToSpaceWorkload::NeonDepthToSpaceWorkload(const DepthToSpaceQueueDescri
void NeonDepthToSpaceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDepthToSpaceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthToSpaceWorkload_Execute");
m_Layer.run();
}
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index aa1fb563ff..4c7adef576 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -166,7 +166,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
void NeonDepthwiseConvolutionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDepthwiseConvolutionWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthwiseConvolutionWorkload_Execute");
ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
index 2764141e1d..d4849c5b12 100644
--- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,7 +44,7 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor&
void NeonDequantizeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDequantizeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDequantizeWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp
index 48db83d7af..cc67ea464c 100644
--- a/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -110,7 +110,7 @@ NeonDetectionPostProcessWorkload::NeonDetectionPostProcessWorkload(
void NeonDetectionPostProcessWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDetectionPostProcessWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDetectionPostProcessWorkload_Execute");
m_Func.run();
}
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
index 19c2bf50b5..1d1e967c51 100644
--- a/src/backends/neon/workloads/NeonDivisionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,7 +50,7 @@ NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descri
void NeonDivisionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonDivisionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDivisionWorkload_Execute");
m_DivLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp b/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp
index 839ec6c5ad..122aa580f3 100644
--- a/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp
+++ b/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp
@@ -85,7 +85,7 @@ NeonElementwiseBinaryWorkload::NeonElementwiseBinaryWorkload(const ElementwiseBi
void NeonElementwiseBinaryWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonElementwiseBinaryWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonElementwiseBinaryWorkload_Execute");
m_ElementwiseBinaryLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonExpWorkload.cpp b/src/backends/neon/workloads/NeonExpWorkload.cpp
index 8c659508e2..8842fe9ad4 100644
--- a/src/backends/neon/workloads/NeonExpWorkload.cpp
+++ b/src/backends/neon/workloads/NeonExpWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -41,7 +41,7 @@ NeonExpWorkload::NeonExpWorkload(const ElementwiseUnaryQueueDescriptor& descript
void NeonExpWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonExpWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonExpWorkload_Execute");
m_ExpLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonFillWorkload.cpp b/src/backends/neon/workloads/NeonFillWorkload.cpp
index bc42482527..82cb7ea668 100644
--- a/src/backends/neon/workloads/NeonFillWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFillWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,7 @@ NeonFillWorkload::NeonFillWorkload(const FillQueueDescriptor& descriptor, const
void NeonFillWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFillWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFillWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
index 1d53245c5f..fdbd9afbed 100644
--- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,7 +29,7 @@ NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descr
void NeonFloorFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFloorFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFloorFloatWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 12eff36cb7..cac4c3b1d8 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -116,7 +116,7 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
void NeonFullyConnectedWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonFullyConnectedWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFullyConnectedWorkload_Execute");
// The constant tensors may not be fully in place until the workload is Executed
if (!prepared)
{
diff --git a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
index f83f48ad58..93884725da 100644
--- a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -189,7 +189,7 @@ NeonGatherNdWorkload::NeonGatherNdWorkload(const GatherNdQueueDescriptor& descri
void NeonGatherNdWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonGatherNdWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonGatherNdWorkload_Execute");
m_MulLayer.run();
m_ReduceSumLayer.run();
m_GatherLayer.run();
diff --git a/src/backends/neon/workloads/NeonGatherWorkload.cpp b/src/backends/neon/workloads/NeonGatherWorkload.cpp
index c1378e987d..5a7630ad77 100644
--- a/src/backends/neon/workloads/NeonGatherWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGatherWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,7 +47,7 @@ NeonGatherWorkload::NeonGatherWorkload(const GatherQueueDescriptor& descriptor,
void NeonGatherWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonGatherWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonGatherWorkload_Execute");
m_Layer.run();
}
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
index 601b619237..89aa9f5eee 100644
--- a/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonInstanceNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -59,7 +59,7 @@ NeonInstanceNormalizationWorkload::NeonInstanceNormalizationWorkload(
void NeonInstanceNormalizationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonInstanceNormalizationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonInstanceNormalizationWorkload_Execute");
m_Layer.run();
}
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index c0c6ed4982..34ab554e0d 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -56,7 +56,7 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma
void NeonL2NormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonL2NormalizationFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonL2NormalizationFloatWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
index 0e64915ed5..3c73aa9b0f 100644
--- a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -54,7 +54,7 @@ NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor&
void NeonLogSoftmaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogSoftmaxWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLogSoftmaxWorkload_Execute");
m_LogSoftmaxLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonLogWorkload.cpp b/src/backends/neon/workloads/NeonLogWorkload.cpp
index e0d59cbd9b..a33ac3b6f4 100644
--- a/src/backends/neon/workloads/NeonLogWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,7 @@ NeonLogWorkload::NeonLogWorkload(const ElementwiseUnaryQueueDescriptor& descript
void NeonLogWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLogWorkload_Execute");
m_LogLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp b/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp
index cfdfd85f0c..5e01e4a97a 100644
--- a/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,7 +50,7 @@ NeonLogicalAndWorkload::NeonLogicalAndWorkload(const LogicalBinaryQueueDescripto
void NeonLogicalAndWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogicalAndWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLogicalAndWorkload_Execute");
m_LogicalAndLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp b/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp
index 42601e1ea2..c8de7c700f 100644
--- a/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,7 +47,7 @@ NeonLogicalNotWorkload::NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescri
void NeonLogicalNotWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogicalNotWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLogicalNotWorkload_Execute");
m_LogicalNotLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp b/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp
index 5f0a51b2b6..8c6da98530 100644
--- a/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,7 +50,7 @@ NeonLogicalOrWorkload::NeonLogicalOrWorkload(const LogicalBinaryQueueDescriptor&
void NeonLogicalOrWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogicalOrWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLogicalOrWorkload_Execute");
m_LogicalOrLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index 19c85f7f33..ac3be5d16a 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -246,7 +246,7 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor& descript
void NeonLstmFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLstmFloatWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLstmFloatWorkload_Execute");
m_LstmLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
index 5fcf9bdf5d..110d87a13f 100644
--- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,7 +39,7 @@ NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descripto
void NeonMaximumWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonMaximumWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonMaximumWorkload_Execute");
m_MaxLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonMeanWorkload.cpp b/src/backends/neon/workloads/NeonMeanWorkload.cpp
index 43aeccab0d..4ef2461dcc 100644
--- a/src/backends/neon/workloads/NeonMeanWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMeanWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,7 +52,7 @@ NeonMeanWorkload::NeonMeanWorkload(const MeanQueueDescriptor& descriptor, const
void NeonMeanWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonMeanWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonMeanWorkload_Execute");
m_Layer.run();
}
diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
index d163b0af36..0f0d5695fb 100644
--- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,7 @@ NeonMinimumWorkload::NeonMinimumWorkload(const MinimumQueueDescriptor& descripto
void NeonMinimumWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonMinimumWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonMinimumWorkload_Execute");
m_MinLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
index 65af61cef7..2764587363 100644
--- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -77,7 +77,7 @@ NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueue
void NeonMultiplicationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonMultiplicationWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonMultiplicationWorkload_Execute");
m_PixelWiseMultiplication->run();
}
diff --git a/src/backends/neon/workloads/NeonNegWorkload.cpp b/src/backends/neon/workloads/NeonNegWorkload.cpp
index a33cd6ffea..80fd014d3d 100644
--- a/src/backends/neon/workloads/NeonNegWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNegWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,7 @@ NeonNegWorkload::NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descript
void NeonNegWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonNegWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonNegWorkload_Execute");
m_NegLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index 01ac5c1b64..2a355edaed 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -106,7 +106,7 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati
void NeonNormalizationFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonNormalizationFloatWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonNormalizationFloatWorkload_Execute");
m_NormalizationLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp
index e6cc219f91..4cab01a0e0 100644
--- a/src/backends/neon/workloads/NeonPadWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPadWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,7 +52,7 @@ NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const Wor
void NeonPadWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonPadWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonPadWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonPermuteWorkload.cpp b/src/backends/neon/workloads/NeonPermuteWorkload.cpp
index 843eaaa586..b58b26f228 100644
--- a/src/backends/neon/workloads/NeonPermuteWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPermuteWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,7 +48,7 @@ NeonPermuteWorkload::NeonPermuteWorkload(const PermuteQueueDescriptor& descripto
void NeonPermuteWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(GetName() + "_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonPermuteWorkload_Execute");
m_PermuteFunction.run();
}
diff --git a/src/backends/neon/workloads/NeonPermuteWorkload.hpp b/src/backends/neon/workloads/NeonPermuteWorkload.hpp
index 7ed14c3aae..7add48df7f 100644
--- a/src/backends/neon/workloads/NeonPermuteWorkload.hpp
+++ b/src/backends/neon/workloads/NeonPermuteWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,12 +22,6 @@ arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo& input, const T
class NeonPermuteWorkload : public NeonBaseWorkload<PermuteQueueDescriptor>
{
public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("NeonPermuteWorkload");
- return name;
- }
-
NeonPermuteWorkload(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
index 6af07f4ea2..780f857479 100644
--- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,7 +61,7 @@ NeonPooling2dWorkload::NeonPooling2dWorkload(
void NeonPooling2dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonPooling2dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonPooling2dWorkload_Execute");
m_PoolingLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonPooling3dWorkload.cpp b/src/backends/neon/workloads/NeonPooling3dWorkload.cpp
index 4bc4f986c3..1436885be7 100644
--- a/src/backends/neon/workloads/NeonPooling3dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPooling3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonPooling3dWorkload.hpp"
@@ -58,7 +58,7 @@ namespace armnn
}
void NeonPooling3dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonPooling3dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonPooling3dWorkload_Execute");
m_PoolingLayer->run();
}
}
diff --git a/src/backends/neon/workloads/NeonPreluWorkload.cpp b/src/backends/neon/workloads/NeonPreluWorkload.cpp
index ee680dd090..ae69d2d420 100644
--- a/src/backends/neon/workloads/NeonPreluWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPreluWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,7 +45,7 @@ NeonPreluWorkload::NeonPreluWorkload(const PreluQueueDescriptor& descriptor,
void NeonPreluWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonPreluWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonPreluWorkload_Execute");
m_PreluLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonQLstmWorkload.cpp b/src/backends/neon/workloads/NeonQLstmWorkload.cpp
index 37f9578360..1d470c9569 100644
--- a/src/backends/neon/workloads/NeonQLstmWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -236,7 +236,7 @@ NeonQLstmWorkload::NeonQLstmWorkload(const QLstmQueueDescriptor& descriptor, con
void NeonQLstmWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonQuantizedLstmWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonQLstmWorkload_Execute");
m_QLstmLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
index 713126f1b3..707236777e 100644
--- a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,7 @@ void NeonQuantizeWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonQuantizeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonQuantizeWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp b/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
index 1872b1f328..5fd44265ca 100644
--- a/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQuantizedLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -124,7 +124,7 @@ NeonQuantizedLstmWorkload::NeonQuantizedLstmWorkload(const QuantizedLstmQueueDes
void NeonQuantizedLstmWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonQuantizedLstmWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonQuantizedLstmWorkload_Execute");
m_QuantizedLstmLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonRankWorkload.hpp b/src/backends/neon/workloads/NeonRankWorkload.hpp
index 22bf7af5e5..bd47db85a3 100644
--- a/src/backends/neon/workloads/NeonRankWorkload.hpp
+++ b/src/backends/neon/workloads/NeonRankWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,6 +19,8 @@ public:
using NeonBaseWorkload<RankQueueDescriptor>::NeonBaseWorkload;
virtual void Execute() const override
{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonRankWorkload_Execute");
+
const NeonTensorHandle* neonTensorHandle = PolymorphicDowncast<const NeonTensorHandle*>(m_Data.m_Inputs[0]);
const int32_t rank = static_cast<int32_t>(neonTensorHandle->GetShape().GetNumDimensions());
diff --git a/src/backends/neon/workloads/NeonReduceWorkload.cpp b/src/backends/neon/workloads/NeonReduceWorkload.cpp
index 45166707fd..0dcbf76d57 100644
--- a/src/backends/neon/workloads/NeonReduceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonReduceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -71,7 +71,7 @@ NeonReduceWorkload::NeonReduceWorkload(const ReduceQueueDescriptor& descriptor,
void NeonReduceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonReduceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonReduceWorkload_Execute");
m_Layer.run();
}
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
index 5e9443cb64..9da6f19e2b 100644
--- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,7 +39,7 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto
void NeonReshapeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonReshapeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonReshapeWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp
index f51d501508..53b0bbb32f 100644
--- a/src/backends/neon/workloads/NeonResizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonResizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -89,7 +89,7 @@ NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor,
void NeonResizeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonResizeWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonResizeWorkload_Execute");
m_ResizeLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp
index a5146ca4c6..df6c39a10b 100644
--- a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,7 @@ NeonRsqrtWorkload::NeonRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, con
void NeonRsqrtWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonRsqrtWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonRsqrtWorkload_Execute");
m_RsqrtLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonSinWorkload.cpp b/src/backends/neon/workloads/NeonSinWorkload.cpp
index cba348d789..619bccccc7 100644
--- a/src/backends/neon/workloads/NeonSinWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSinWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,7 @@ NeonSinWorkload::NeonSinWorkload(const ElementwiseUnaryQueueDescriptor& descript
void NeonSinWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSinWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSinWorkload_Execute");
m_SinLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonSliceWorkload.cpp b/src/backends/neon/workloads/NeonSliceWorkload.cpp
index f8b2e22773..c5bca560b1 100644
--- a/src/backends/neon/workloads/NeonSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -59,7 +59,7 @@ NeonSliceWorkload::NeonSliceWorkload(const SliceQueueDescriptor& descriptor,
void NeonSliceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSliceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSliceWorkload_Execute");
m_SliceFunction.run();
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
index f2bc084913..669b24cc1d 100644
--- a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -54,7 +54,7 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto
void NeonSoftmaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSoftmaxWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSoftmaxWorkload_Execute");
m_SoftmaxLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
index 291fa8110e..a8439a5e96 100644
--- a/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -173,7 +173,7 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue
void NeonSpaceToBatchNdWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSpaceToBatchNdWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSpaceToBatchNdWorkload_Execute");
if (m_LayerReshapeInput)
{
m_LayerReshapeInput->run();
diff --git a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
index b4eca46188..fd031c047c 100644
--- a/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSpaceToDepthWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -60,7 +60,7 @@ void NeonSpaceToDepthWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSpaceToDepthWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSpaceToDepthWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
index b5f019105b..c307822325 100644
--- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -112,7 +112,7 @@ void NeonSplitterWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSplitterWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSplitterWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/neon/workloads/NeonSqrtWorkload.cpp b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
index dc6e3a3759..9c3d8a0a99 100644
--- a/src/backends/neon/workloads/NeonSqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -53,7 +53,7 @@ NeonSqrtWorkload::NeonSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descri
void NeonSqrtWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSqrtWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSqrtWorkload_Execute");
m_SqrtLayer.run();
}
diff --git a/src/backends/neon/workloads/NeonStackWorkload.cpp b/src/backends/neon/workloads/NeonStackWorkload.cpp
index 5b4cfbcadd..6dcf073e1c 100644
--- a/src/backends/neon/workloads/NeonStackWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStackWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonStackWorkload.hpp"
@@ -73,7 +73,7 @@ void NeonStackWorkload::Execute() const
{
if (m_Layer)
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonStackWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonStackWorkload_Execute");
m_Layer->run();
}
}
diff --git a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
index 219c40c302..114281b377 100644
--- a/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
+++ b/src/backends/neon/workloads/NeonStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -95,7 +95,7 @@ NeonStridedSliceWorkload::NeonStridedSliceWorkload(const StridedSliceQueueDescri
void NeonStridedSliceWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonStridedSliceWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonStridedSliceWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
index 10716021ab..eb79380b99 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -57,7 +57,7 @@ NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescripto
void NeonSubtractionWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSubtractionWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonSubtractionWorkload_Execute");
m_SubLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonTileWorkload.cpp b/src/backends/neon/workloads/NeonTileWorkload.cpp
index 9b699ef4f1..a43e082ff1 100644
--- a/src/backends/neon/workloads/NeonTileWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTileWorkload.cpp
@@ -40,7 +40,7 @@ NeonTileWorkload::NeonTileWorkload(const armnn::TileQueueDescriptor& descriptor,
void NeonTileWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonTileWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonTileWorkload_Execute");
m_Layer.run();
}
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 310a5dcd23..2fa118b679 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTransposeConvolution2dWorkload.hpp"
@@ -112,7 +112,7 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
void NeonTransposeConvolution2dWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonTransposeConvolution2dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonTransposeConvolution2dWorkload_Execute");
m_Layer->run();
}
diff --git a/src/backends/neon/workloads/NeonTransposeWorkload.cpp b/src/backends/neon/workloads/NeonTransposeWorkload.cpp
index c2960c0de5..c71217ce73 100644
--- a/src/backends/neon/workloads/NeonTransposeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,7 +47,7 @@ NeonTransposeWorkload::NeonTransposeWorkload(const TransposeQueueDescriptor& des
void NeonTransposeWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(GetName() + "_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonTransposeWorkload_Execute");
m_PermuteFunction.run();
}
diff --git a/src/backends/neon/workloads/NeonTransposeWorkload.hpp b/src/backends/neon/workloads/NeonTransposeWorkload.hpp
index 4d5e6c2633..3769f860c9 100644
--- a/src/backends/neon/workloads/NeonTransposeWorkload.hpp
+++ b/src/backends/neon/workloads/NeonTransposeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,12 +22,6 @@ arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo& input, const
class NeonTransposeWorkload : public NeonBaseWorkload<TransposeQueueDescriptor>
{
public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("NeonTransposeWorkload");
- return name;
- }
-
NeonTransposeWorkload(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
diff --git a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp
index 1905bcb659..e48425e3ee 100644
--- a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmFloatWorkload.cpp
@@ -483,7 +483,7 @@ NeonUnidirectionalSequenceLstmFloatWorkload::NeonUnidirectionalSequenceLstmFloat
void NeonUnidirectionalSequenceLstmFloatWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonUnidirectionalSequenceLstmFloatWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonUnidirectionalSequenceLstmFloatWorkload_Execute");
if (m_Permute1)
{
m_Permute1->run();
diff --git a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmWorkload.cpp b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmWorkload.cpp
index dfbbb3c879..8a1747edd1 100644
--- a/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmWorkload.cpp
+++ b/src/backends/neon/workloads/NeonUnidirectionalSequenceLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -464,7 +464,7 @@ NeonUnidirectionalSequenceLstmWorkload::NeonUnidirectionalSequenceLstmWorkload
void NeonUnidirectionalSequenceLstmWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonUnidirectionalSequenceLstmWorkload_Execute", GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonUnidirectionalSequenceLstmWorkload_Execute");
if (m_Permute1)
{
m_Permute1->run();
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 9f8bb9540e..694c3ab928 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -24,7 +24,15 @@
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
guid, \
- name, \
+ GetName() + "_" + name, \
+ armnn::NeonTimer(), \
+ armnn::WallClockTimer())
+
+/// Creates a profiling event that uses GetGuid() and GetName() from the calling class
+#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label) \
+ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+ this->GetGuid(), \
+ this->GetName() + "_" + label, \
armnn::NeonTimer(), \
armnn::WallClockTimer())
diff --git a/src/backends/reference/workloads/RefActivationWorkload.cpp b/src/backends/reference/workloads/RefActivationWorkload.cpp
index bdc637aa5e..5d95dfc212 100644
--- a/src/backends/reference/workloads/RefActivationWorkload.cpp
+++ b/src/backends/reference/workloads/RefActivationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -28,7 +28,7 @@ void RefActivationWorkload::ExecuteAsync(ExecutionData& executionData)
void RefActivationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefActivationWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefActivationWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
index 910ea73644..bf5b4708a3 100644
--- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefArgMinMaxWorkload::ExecuteAsync(ExecutionData& executionData)
void RefArgMinMaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefArgMinMaxWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefArgMinMaxWorkload_Execute");
const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp b/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp
index 027b93b5d9..9a981023a7 100644
--- a/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,7 +29,7 @@ void RefBatchMatMulWorkload::ExecuteAsync(ExecutionData& executionData)
void RefBatchMatMulWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchMatMulWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBatchMatMulWorkload_Execute");
const TensorInfo& inputXInfo = GetTensorInfo(inputs[0]);
const TensorInfo& inputYInfo = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
index ed99c63b64..ee24bbc4b5 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,7 @@ void RefBatchNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
void RefBatchNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBatchNormalizationWorkload_Execute");
std::unique_ptr<Decoder<float>> meanDecoder = MakeDecoder<float>(m_Mean->GetTensorInfo(),
m_Mean->Map(true));
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
index 6bb8aff72c..2a2a6a9701 100644
--- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -24,7 +24,7 @@ void RefBatchToSpaceNdWorkload::ExecuteAsync(ExecutionData& executionData)
void RefBatchToSpaceNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBatchToSpaceNdWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefCastWorkload.cpp b/src/backends/reference/workloads/RefCastWorkload.cpp
index 5dce5d9a86..40fbce6f4e 100644
--- a/src/backends/reference/workloads/RefCastWorkload.cpp
+++ b/src/backends/reference/workloads/RefCastWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,7 +39,7 @@ void RefCastWorkload::ExecuteAsync(ExecutionData& executionData)
void RefCastWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefCastWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefCastWorkload_Execute");
TensorInfo inputTensorInfo(GetTensorInfo(inputs[0]));
TensorInfo outputTensorInfo(GetTensorInfo(outputs[0]));
diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
index 8d317ba333..c23291d06a 100644
--- a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
+++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,7 +29,7 @@ void RefChannelShuffleWorkload::ExecuteAsync(ExecutionData& executionData)
void RefChannelShuffleWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefChannelShuffleWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefChannelShuffleWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefComparisonWorkload.cpp b/src/backends/reference/workloads/RefComparisonWorkload.cpp
index 0ce83a99f3..d47efed020 100644
--- a/src/backends/reference/workloads/RefComparisonWorkload.cpp
+++ b/src/backends/reference/workloads/RefComparisonWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -57,7 +57,7 @@ void RefComparisonWorkload::ExecuteAsync(ExecutionData& executionData)
void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefComparisonWorkload_Execute");
const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefConcatWorkload.cpp b/src/backends/reference/workloads/RefConcatWorkload.cpp
index 5aa8f037e5..9f4a999180 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.cpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.cpp
@@ -1,13 +1,12 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RefConcatWorkload.hpp"
-
#include "Concatenate.hpp"
-
#include "Profiling.hpp"
+#include "RefWorkloadUtils.hpp"
namespace armnn
{
@@ -25,7 +24,7 @@ void RefConcatWorkload::ExecuteAsync(ExecutionData& executionData)
void RefConcatWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConcatWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConcatWorkload_Execute");
Concatenate(m_Data, inputs, outputs);
}
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index 937e5178bb..64b01e2b49 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,9 +33,8 @@ void RefConstantWorkload::ExecuteAsync(ExecutionData& executionData)
void RefConstantWorkload::Execute(std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConstantWorkload_Execute");
memcpy(outputs[0]->Map(), m_Data.m_LayerOutput->GetConstTensor<void>(), GetTensorInfo(outputs[0]).GetNumBytes());
-
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute");
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
index fa811e1a32..4bdcfffefa 100644
--- a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,7 +27,7 @@ void RefConvertFp16ToFp32Workload::ExecuteAsync(ExecutionData& executionData)
void RefConvertFp16ToFp32Workload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp16ToFp32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvertFp16ToFp32Workload_Execute");
const Half* const input = reinterpret_cast<const Half*>(inputs[0]->Map());
float* const output = reinterpret_cast<float*>(outputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
index 4992e9c07a..3c7c84a950 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -28,7 +28,7 @@ void RefConvertFp32ToFp16Workload::ExecuteAsync(ExecutionData& executionData)
void RefConvertFp32ToFp16Workload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToFp16Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvertFp32ToFp16Workload_Execute");
const float* const input = reinterpret_cast<const float*>(inputs[0]->Map());
Half* const output = reinterpret_cast<Half*>(outputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index 355d5262df..1adeb6dd93 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,7 @@ void RefConvolution2dWorkload::ExecuteAsync(ExecutionData& executionData)
void RefConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution2dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvolution2dWorkload_Execute");
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
index 3ac7cd7286..0953718a85 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,7 +45,7 @@ void RefConvolution3dWorkload::ExecuteAsync(ExecutionData& executionData)
void RefConvolution3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution3dWorkload_Execute", this->GetGuid());
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvolution3dWorkload_Execute");
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index db67b3a782..3653bb6c13 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefDebugWorkload<DataType>::Execute(std::vector<ITensorHandle*> inputs) con
{
using T = ResolveType<DataType>;
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index 91bc322048..0dd98d2ef3 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
: TypedWorkload<DebugQueueDescriptor, DataType>(descriptor, info)
, m_Callback(nullptr) {}
- static const std::string& GetName()
+ virtual const std::string& GetName() const override
{
static const std::string name = std::string("RefDebug") + GetDataTypeName(DataType) + "Workload";
return name;
@@ -39,7 +39,7 @@ private:
DebugCallbackFunction m_Callback;
};
-using RefDebugBFloat16Workload = RefDebugWorkload<DataType::BFloat16>;
+using RefDebugBFloat16Workload = RefDebugWorkload<DataType::BFloat16>;
using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
using RefDebugQAsymmU8Workload = RefDebugWorkload<DataType::QAsymmU8>;
diff --git a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
index cb1137847b..2dddbd7de5 100644
--- a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -24,8 +24,7 @@ void RefDepthToSpaceWorkload::ExecuteAsync(ExecutionData& executionData)
void RefDepthToSpaceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthToSpaceWorkload_Execute");
-
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDepthToSpaceWorkload_Execute");
const TensorInfo inputInfo = GetTensorInfo(inputs[0]);
DepthToSpace(inputInfo,
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
index c0677c9bf1..8779c2ed5b 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -50,7 +50,7 @@ void RefDepthwiseConvolution2dWorkload::ExecuteAsync(ExecutionData& executionDat
void RefDepthwiseConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthwiseConvolution2dWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDepthwiseConvolution2dWorkload_Execute");
const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape();
const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape();
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
index aa5ff6224a..96a8f66409 100644
--- a/src/backends/reference/workloads/RefDequantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,7 +25,7 @@ void RefDequantizeWorkload::ExecuteAsync(ExecutionData& executionData)
void RefDequantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDequantizeWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
index ba7933b177..b971b5f0c9 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefDetectionPostProcessWorkload::ExecuteAsync(ExecutionData& executionData)
void RefDetectionPostProcessWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDetectionPostProcessWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDetectionPostProcessWorkload_Execute");
const TensorInfo& boxEncodingsInfo = GetTensorInfo(inputs[0]);
const TensorInfo& scoresInfo = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
index e71cdd4e3c..2f30dff211 100644
--- a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
@@ -119,7 +119,7 @@ void RefElementwiseBinaryWorkload::ExecuteAsync(ExecutionData& executionData)
void RefElementwiseBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseBinaryWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefElementwiseBinaryWorkload_Execute");
if (GetTensorInfo(inputs[0]).GetDataType() == DataType::Signed32)
{
diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
index f4775e0c19..fa277c6a2e 100644
--- a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
@@ -45,7 +45,7 @@ void RefElementwiseUnaryWorkload::ExecuteAsync(ExecutionData& executionData)
void RefElementwiseUnaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseUnaryWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefElementwiseUnaryWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index 344ca344e3..0cf20985f0 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,7 @@ template <typename Functor, typename ParentDescriptor, typename armnn::StringMap
void RefElementwiseWorkload<Functor, ParentDescriptor, DebugString>::Execute(
std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, StringMapping::Instance().Get(DebugString));
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(StringMapping::Instance().Get(DebugString));
const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
index 828204fe07..7780841766 100644
--- a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,7 @@ void RefFakeQuantizationFloat32Workload::ExecuteAsync(ExecutionData& executionDa
void RefFakeQuantizationFloat32Workload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFakeQuantizationFloat32Workload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefFillWorkload.cpp b/src/backends/reference/workloads/RefFillWorkload.cpp
index a0f0c6b30e..cb431fe64c 100644
--- a/src/backends/reference/workloads/RefFillWorkload.cpp
+++ b/src/backends/reference/workloads/RefFillWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,7 +27,7 @@ void RefFillWorkload::ExecuteAsync(ExecutionData& executionData)
void RefFillWorkload::Execute(std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFillWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFillWorkload_Execute");
const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefFloorWorkload.cpp b/src/backends/reference/workloads/RefFloorWorkload.cpp
index d02e529d04..300c9070c4 100644
--- a/src/backends/reference/workloads/RefFloorWorkload.cpp
+++ b/src/backends/reference/workloads/RefFloorWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefFloorWorkload::ExecuteAsync(ExecutionData& executionData)
void RefFloorWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFloorFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFloatWorkload_Execute");
const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]);
std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, inputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index 734d7f3503..42737e2af6 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,7 +47,7 @@ void RefFullyConnectedWorkload::ExecuteAsync(ExecutionData& executionData)
void RefFullyConnectedWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFullyConnectedWorkload_Execute");
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefGatherNdWorkload.cpp b/src/backends/reference/workloads/RefGatherNdWorkload.cpp
index 9a9478c3dc..6d98d54a77 100644
--- a/src/backends/reference/workloads/RefGatherNdWorkload.cpp
+++ b/src/backends/reference/workloads/RefGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefGatherNdWorkload::ExecuteAsync(ExecutionData& executionData)
void RefGatherNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefGatherNdWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefGatherNdWorkload_Execute");
const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefGatherWorkload.cpp b/src/backends/reference/workloads/RefGatherWorkload.cpp
index 55a4c0961d..129dcf1b27 100644
--- a/src/backends/reference/workloads/RefGatherWorkload.cpp
+++ b/src/backends/reference/workloads/RefGatherWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefGatherWorkload::ExecuteAsync(ExecutionData& executionData)
void RefGatherWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefGatherWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefGatherWorkload_Execute");
const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp
index dd4fbf3ccd..16d0547d4d 100644
--- a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefInstanceNormalizationWorkload::ExecuteAsync(ExecutionData& executionData
void RefInstanceNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefInstanceNormalizationWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefInstanceNormalizationWorkload_Execute");
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
inputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
index bce8f245f5..2b64becb26 100644
--- a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,7 @@ void RefL2NormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
void RefL2NormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefL2NormalizationWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index a21eb459a7..e45d24a0bd 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,7 @@ void RefLogSoftmaxWorkload::ExecuteAsync(ExecutionData& executionData)
void RefLogSoftmaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogSoftmaxWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLogSoftmaxWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
index b132061008..4a67832f3d 100644
--- a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,7 @@ void RefLogicalBinaryWorkload::ExecuteAsync(ExecutionData& executionData)
void RefLogicalBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalBinaryWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLogicalBinaryWorkload_Execute");
const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
index a84af442ab..90f306a175 100644
--- a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,7 @@ void RefLogicalUnaryWorkload::ExecuteAsync(ExecutionData& executionData)
void RefLogicalUnaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalUnaryWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLogicalUnaryWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefLstmWorkload.cpp b/src/backends/reference/workloads/RefLstmWorkload.cpp
index 3879051a5b..075aa80419 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,6 +52,8 @@ void RefLstmWorkload::ExecuteAsync(ExecutionData& executionData)
void RefLstmWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLstmWorkload_Execute");
+
// This is a porting of the LSTM::Eval() method in the Android code base
// Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp
diff --git a/src/backends/reference/workloads/RefMeanWorkload.cpp b/src/backends/reference/workloads/RefMeanWorkload.cpp
index 5d73a43a80..38c6017a58 100644
--- a/src/backends/reference/workloads/RefMeanWorkload.cpp
+++ b/src/backends/reference/workloads/RefMeanWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,7 @@ void RefMeanWorkload::ExecuteAsync(ExecutionData& executionData)
void RefMeanWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefMeanWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index 40c9a6f449..170b1bdfa5 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -174,7 +174,7 @@ void RefNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
void RefNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefNormalizationWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 9bc4efa919..c515942b30 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefPadWorkload::ExecuteAsync(ExecutionData& executionData)
void RefPadWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPadWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPadWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp
index e0e3b4fbd8..a2069a8ffe 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.cpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefPermuteWorkload<DataType>::Execute(std::vector<ITensorHandle*> inputs,
{
using T = ResolveType<DataType>;
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPermuteWorkload_Execute");
const ITensorHandle* src = inputs[0];
ITensorHandle* dst = outputs[0];
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp
index c6b8e3b12d..58f7c8efb5 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.hpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,12 +16,6 @@ template <armnn::DataType DataType>
class RefPermuteWorkload : public TypedWorkload<PermuteQueueDescriptor, DataType>
{
public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("RefPermute") + GetDataTypeName(DataType) + "Workload";
- return name;
- }
-
using TypedWorkload<PermuteQueueDescriptor, DataType>::m_Data;
using TypedWorkload<PermuteQueueDescriptor, DataType>::TypedWorkload;
void Execute() const override;
diff --git a/src/backends/reference/workloads/RefPooling2dWorkload.cpp b/src/backends/reference/workloads/RefPooling2dWorkload.cpp
index 9dc9a3568a..e4870567de 100644
--- a/src/backends/reference/workloads/RefPooling2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefPooling2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefPooling2dWorkload::ExecuteAsync(ExecutionData& executionData)
void RefPooling2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPooling2dWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPooling2dWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefPooling3dWorkload.cpp b/src/backends/reference/workloads/RefPooling3dWorkload.cpp
index 5f1eda2dab..4108b883cf 100644
--- a/src/backends/reference/workloads/RefPooling3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefPooling3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefPooling3dWorkload::ExecuteAsync(ExecutionData& executionData)
void RefPooling3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPooling3dWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPooling3dWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefPreluWorkload.cpp b/src/backends/reference/workloads/RefPreluWorkload.cpp
index efe7a4c239..a99e2d3b04 100644
--- a/src/backends/reference/workloads/RefPreluWorkload.cpp
+++ b/src/backends/reference/workloads/RefPreluWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,8 +31,7 @@ void RefPreluWorkload::ExecuteAsync(ExecutionData& executionData)
void RefPreluWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPreluWorkload_Execute");
-
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPreluWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& alphaInfo = GetTensorInfo(inputs[1]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.cpp b/src/backends/reference/workloads/RefQLstmWorkload.cpp
index 398faa9074..a5f939668b 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -56,6 +56,8 @@ void RefQLstmWorkload::ExecuteAsync(ExecutionData& executionData)
void RefQLstmWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefQLstmWorkload_Execute");
+
// This is a porting of the QLSTM::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs)
// method in the Android code base
// Note: this implementation wraps the arithmetic functions of the LSTM cell in Quantize/Dequantize ops, so all
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index e54ab456cd..4468cd7a94 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,6 +47,8 @@ void RefQuantizeWorkload::ExecuteAsync(ExecutionData& executionData)
void RefQuantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefQuantizeWorkload_Execute");
+
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefRankWorkload.hpp b/src/backends/reference/workloads/RefRankWorkload.hpp
index 48109529f0..a806fe0df4 100644
--- a/src/backends/reference/workloads/RefRankWorkload.hpp
+++ b/src/backends/reference/workloads/RefRankWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,12 +25,13 @@ public:
void ExecuteAsync(ExecutionData& executionData) override
{
WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
- Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefRankWorkload_Execute");
const int32_t rank = static_cast<int32_t>(GetTensorInfo(inputs[0]).GetNumDimensions());
std::memcpy(outputs[0]->Map(), &rank, sizeof(int32_t));
diff --git a/src/backends/reference/workloads/RefReduceWorkload.cpp b/src/backends/reference/workloads/RefReduceWorkload.cpp
index b4b8952923..05f4cc5c7a 100644
--- a/src/backends/reference/workloads/RefReduceWorkload.cpp
+++ b/src/backends/reference/workloads/RefReduceWorkload.cpp
@@ -1,6 +1,6 @@
//
// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
-// Copyright © 2021-2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefReduceWorkload::ExecuteAsync(ExecutionData& executionData)
void RefReduceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReduceWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefReduceWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefReshapeWorkload.cpp b/src/backends/reference/workloads/RefReshapeWorkload.cpp
index a93645e2ea..213bd10cfd 100644
--- a/src/backends/reference/workloads/RefReshapeWorkload.cpp
+++ b/src/backends/reference/workloads/RefReshapeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,7 +25,7 @@ void RefReshapeWorkload::ExecuteAsync(ExecutionData& executionData)
void RefReshapeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReshapeWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefReshapeWorkload_Execute");
void* output = outputs[0]->Map();
const void* input = inputs[0]->Map();
diff --git a/src/backends/reference/workloads/RefResizeWorkload.cpp b/src/backends/reference/workloads/RefResizeWorkload.cpp
index 39a2a29878..284f9d14f3 100644
--- a/src/backends/reference/workloads/RefResizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefResizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,7 @@ void RefResizeWorkload::ExecuteAsync(ExecutionData& executionData)
void RefResizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefResizeWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefReverseV2Workload.cpp b/src/backends/reference/workloads/RefReverseV2Workload.cpp
index 22d5449466..b0d2f445b5 100644
--- a/src/backends/reference/workloads/RefReverseV2Workload.cpp
+++ b/src/backends/reference/workloads/RefReverseV2Workload.cpp
@@ -29,7 +29,7 @@ namespace armnn
void RefReverseV2Workload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReverseV2Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefReverseV2Workload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& axisInfo = GetTensorInfo(inputs[1]);
diff --git a/src/backends/reference/workloads/RefShapeWorkload.hpp b/src/backends/reference/workloads/RefShapeWorkload.hpp
index bc4d50ac92..fa36f49003 100644
--- a/src/backends/reference/workloads/RefShapeWorkload.hpp
+++ b/src/backends/reference/workloads/RefShapeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,6 +30,8 @@ public:
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefShapeWorkload_Execute");
+
const TensorShape Shape = GetTensorInfo(inputs[0]).GetShape();
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefSliceWorkload.cpp b/src/backends/reference/workloads/RefSliceWorkload.cpp
index 60c3950c32..ca8c2a0169 100644
--- a/src/backends/reference/workloads/RefSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ void RefSliceWorkload::ExecuteAsync(ExecutionData& executionData)
void RefSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSliceWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSliceWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
index f2579ce388..f8034b5b01 100644
--- a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,7 @@ void RefSoftmaxWorkload::ExecuteAsync(ExecutionData& executionData)
void RefSoftmaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSoftmaxWorkload_Execute");
const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp
index d29c2c801e..5cb387207a 100644
--- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp
+++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp
@@ -24,7 +24,7 @@ void RefSpaceToBatchNdWorkload::ExecuteAsync(ExecutionData& executionData)
void RefSpaceToBatchNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToBatchNdWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSpaceToBatchNdWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
index e8dd052e94..1b1afca11a 100644
--- a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
+++ b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,7 +25,7 @@ void RefSpaceToDepthWorkload::ExecuteAsync(ExecutionData& executionData)
void RefSpaceToDepthWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToDepthWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSpaceToDepthWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
diff --git a/src/backends/reference/workloads/RefSplitterWorkload.cpp b/src/backends/reference/workloads/RefSplitterWorkload.cpp
index 93b393b243..dcd7d6cb39 100644
--- a/src/backends/reference/workloads/RefSplitterWorkload.cpp
+++ b/src/backends/reference/workloads/RefSplitterWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -24,7 +24,8 @@ void RefSplitterWorkload::ExecuteAsync(ExecutionData& executionData)
void RefSplitterWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSplitterWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSplitterWorkload_Execute");
+
Split(m_Data, inputs, outputs);
}
diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp
index e35c2d52c6..f24c6e6e2e 100644
--- a/src/backends/reference/workloads/RefStackWorkload.cpp
+++ b/src/backends/reference/workloads/RefStackWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,7 @@ void RefStackWorkload::ExecuteAsync(ExecutionData& executionData)
void RefStackWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStackWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStackWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index f5ca0c18d7..c4a4f7f593 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -28,7 +28,7 @@ void RefStridedSliceWorkload::ExecuteAsync(ExecutionData& executionData)
void RefStridedSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStridedSliceWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStridedSliceWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefTileWorkload.cpp b/src/backends/reference/workloads/RefTileWorkload.cpp
index 9fa8c8c3d3..9062f49c26 100644
--- a/src/backends/reference/workloads/RefTileWorkload.cpp
+++ b/src/backends/reference/workloads/RefTileWorkload.cpp
@@ -28,7 +28,7 @@ namespace armnn
void RefTileWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTileWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefTileWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
diff --git a/src/backends/reference/workloads/RefTileWorkload.hpp b/src/backends/reference/workloads/RefTileWorkload.hpp
index 2fb8eab05e..f168192a18 100644
--- a/src/backends/reference/workloads/RefTileWorkload.hpp
+++ b/src/backends/reference/workloads/RefTileWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
index 1269b3ff04..16ecab1878 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,7 +47,7 @@ void RefTransposeConvolution2dWorkload::ExecuteAsync(ExecutionData& executionDat
void RefTransposeConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTransposeConvolution2dWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefTransposeConvolution2dWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp
index 6c94e7d2c8..3c679e85eb 100644
--- a/src/backends/reference/workloads/RefTransposeWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ void RefTransposeWorkload<DataType>::Execute(std::vector<ITensorHandle*> inputs,
{
using T = ResolveType<DataType>;
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefTransposeWorkload_Execute");
const ITensorHandle* src = inputs[0];
ITensorHandle* dst = outputs[0];
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp
index db4f683699..97ebbd80ae 100644
--- a/src/backends/reference/workloads/RefTransposeWorkload.hpp
+++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp
@@ -16,12 +16,6 @@ template <armnn::DataType DataType>
class RefTransposeWorkload : public TypedWorkload<TransposeQueueDescriptor, DataType>
{
public:
- static const std::string& GetName()
- {
- static const std::string name = std::string("RefTranspose") + GetDataTypeName(DataType) + "Workload";
- return name;
- }
-
using TypedWorkload<TransposeQueueDescriptor, DataType>::m_Data;
using TypedWorkload<TransposeQueueDescriptor, DataType>::TypedWorkload;
void Execute() const override;
diff --git a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp
index 23022d076c..c7a4b76964 100644
--- a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -57,6 +57,8 @@ void RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(ExecutionData& executio
void RefUnidirectionalSequenceLstmWorkload::Execute(std::vector<ITensorHandle*> inputs,
std::vector<ITensorHandle*> outputs) const
{
+ ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefUnidirectionalSequenceLstmWorkload_Execute");
+
TensorInfo inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputStateInfo = GetTensorInfo(inputs[1]);
const TensorInfo& cellStateInfo = GetTensorInfo(inputs[2]);
diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp
index 7c35966f0f..c840887fc0 100644
--- a/src/backends/reference/workloads/RefWorkloadUtils.hpp
+++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,6 +18,12 @@
namespace armnn
{
+/// Creates a profiling event that uses GetGuid() and GetName() from the calling class
+#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label) \
+ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuRef, \
+ this->GetGuid(), \
+ this->GetName() + "_" + label, \
+ armnn::WallClockTimer())
////////////////////////////////////////////
/// float32 helpers