From 7cbe78140a274cec783049051df7c7298b974f13 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Tue, 25 Jul 2023 17:37:33 +0100 Subject: MLCE-1092 Add Names to Workloads * Added names to Workloads. * Workloads will be given the name of the Layer that created them. * Added new profiling macros to CL Neon and Ref that add the workload name to the event label * Updated workloads to use new macros. * Added missing profiling to Rank Workloads. * Fixed issue where ClConvolution2dWorkload was being reported as Undefined rather than GpuAcc. Signed-off-by: Mike Kelly Change-Id: I0a55eab6c2f455b73943aca8e99a247c3cb2a906 --- src/backends/reference/workloads/RefActivationWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefArgMinMaxWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefBatchMatMulWorkload.cpp | 4 ++-- .../reference/workloads/RefBatchNormalizationWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefCastWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefChannelShuffleWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefComparisonWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefConcatWorkload.cpp | 7 +++---- src/backends/reference/workloads/RefConstantWorkload.cpp | 5 ++--- src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp | 4 ++-- src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp | 4 ++-- src/backends/reference/workloads/RefConvolution2dWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefConvolution3dWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefDebugWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefDebugWorkload.hpp | 6 +++--- src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp | 5 ++--- .../reference/workloads/RefDepthwiseConvolution2dWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefDequantizeWorkload.cpp | 4 ++-- .../reference/workloads/RefDetectionPostProcessWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp | 2 +- src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp | 2 +- src/backends/reference/workloads/RefElementwiseWorkload.cpp | 4 ++-- .../reference/workloads/RefFakeQuantizationFloat32Workload.cpp | 4 ++-- src/backends/reference/workloads/RefFillWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefFloorWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefFullyConnectedWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefGatherNdWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefGatherWorkload.cpp | 4 ++-- .../reference/workloads/RefInstanceNormalizationWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefL2NormalizationWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefLstmWorkload.cpp | 4 +++- src/backends/reference/workloads/RefMeanWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefNormalizationWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefPadWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefPermuteWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefPermuteWorkload.hpp | 8 +------- src/backends/reference/workloads/RefPooling2dWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefPooling3dWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefPreluWorkload.cpp | 5 ++--- src/backends/reference/workloads/RefQLstmWorkload.cpp | 4 +++- src/backends/reference/workloads/RefQuantizeWorkload.cpp | 4 +++- src/backends/reference/workloads/RefRankWorkload.hpp | 5 +++-- src/backends/reference/workloads/RefReduceWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefReshapeWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefResizeWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefReverseV2Workload.cpp | 2 +- src/backends/reference/workloads/RefShapeWorkload.hpp | 4 +++- src/backends/reference/workloads/RefSliceWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefSoftmaxWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp | 2 +- src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefSplitterWorkload.cpp | 5 +++-- src/backends/reference/workloads/RefStackWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefStridedSliceWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefTileWorkload.cpp | 2 +- src/backends/reference/workloads/RefTileWorkload.hpp | 2 +- .../reference/workloads/RefTransposeConvolution2dWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefTransposeWorkload.cpp | 4 ++-- src/backends/reference/workloads/RefTransposeWorkload.hpp | 6 ------ .../reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp | 4 +++- src/backends/reference/workloads/RefWorkloadUtils.hpp | 8 +++++++- 65 files changed, 135 insertions(+), 133 deletions(-) (limited to 'src/backends/reference/workloads') diff --git a/src/backends/reference/workloads/RefActivationWorkload.cpp b/src/backends/reference/workloads/RefActivationWorkload.cpp index bdc637aa5e..5d95dfc212 100644 --- a/src/backends/reference/workloads/RefActivationWorkload.cpp +++ b/src/backends/reference/workloads/RefActivationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -28,7 +28,7 @@ void RefActivationWorkload::ExecuteAsync(ExecutionData& executionData) void RefActivationWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefActivationWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefActivationWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp index 910ea73644..bf5b4708a3 100644 --- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp +++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefArgMinMaxWorkload::ExecuteAsync(ExecutionData& executionData) void RefArgMinMaxWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefArgMinMaxWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefArgMinMaxWorkload_Execute"); const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp b/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp index 027b93b5d9..9a981023a7 100644 --- a/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp +++ b/src/backends/reference/workloads/RefBatchMatMulWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -29,7 +29,7 @@ void RefBatchMatMulWorkload::ExecuteAsync(ExecutionData& executionData) void RefBatchMatMulWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchMatMulWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBatchMatMulWorkload_Execute"); const TensorInfo& inputXInfo = GetTensorInfo(inputs[0]); const TensorInfo& inputYInfo = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp index ed99c63b64..ee24bbc4b5 100644 --- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp +++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -36,7 +36,7 @@ void RefBatchNormalizationWorkload::ExecuteAsync(ExecutionData& executionData) void RefBatchNormalizationWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBatchNormalizationWorkload_Execute"); std::unique_ptr> meanDecoder = MakeDecoder(m_Mean->GetTensorInfo(), m_Mean->Map(true)); diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp index 6bb8aff72c..2a2a6a9701 100644 --- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp +++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2018-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -24,7 +24,7 @@ void RefBatchToSpaceNdWorkload::ExecuteAsync(ExecutionData& executionData) void RefBatchToSpaceNdWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchToSpaceNdWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBatchToSpaceNdWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefCastWorkload.cpp b/src/backends/reference/workloads/RefCastWorkload.cpp index 5dce5d9a86..40fbce6f4e 100644 --- a/src/backends/reference/workloads/RefCastWorkload.cpp +++ b/src/backends/reference/workloads/RefCastWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -39,7 +39,7 @@ void RefCastWorkload::ExecuteAsync(ExecutionData& executionData) void RefCastWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefCastWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefCastWorkload_Execute"); TensorInfo inputTensorInfo(GetTensorInfo(inputs[0])); TensorInfo outputTensorInfo(GetTensorInfo(outputs[0])); diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp index 8d317ba333..c23291d06a 100644 --- a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp +++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -29,7 +29,7 @@ void RefChannelShuffleWorkload::ExecuteAsync(ExecutionData& executionData) void RefChannelShuffleWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefChannelShuffleWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefChannelShuffleWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefComparisonWorkload.cpp b/src/backends/reference/workloads/RefComparisonWorkload.cpp index 0ce83a99f3..d47efed020 100644 --- a/src/backends/reference/workloads/RefComparisonWorkload.cpp +++ b/src/backends/reference/workloads/RefComparisonWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -57,7 +57,7 @@ void RefComparisonWorkload::ExecuteAsync(ExecutionData& executionData) void RefComparisonWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefComparisonWorkload_Execute"); const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefConcatWorkload.cpp b/src/backends/reference/workloads/RefConcatWorkload.cpp index 5aa8f037e5..9f4a999180 100644 --- a/src/backends/reference/workloads/RefConcatWorkload.cpp +++ b/src/backends/reference/workloads/RefConcatWorkload.cpp @@ -1,13 +1,12 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "RefConcatWorkload.hpp" - #include "Concatenate.hpp" - #include "Profiling.hpp" +#include "RefWorkloadUtils.hpp" namespace armnn { @@ -25,7 +24,7 @@ void RefConcatWorkload::ExecuteAsync(ExecutionData& executionData) void RefConcatWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConcatWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConcatWorkload_Execute"); Concatenate(m_Data, inputs, outputs); } diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp index 937e5178bb..64b01e2b49 100644 --- a/src/backends/reference/workloads/RefConstantWorkload.cpp +++ b/src/backends/reference/workloads/RefConstantWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -33,9 +33,8 @@ void RefConstantWorkload::ExecuteAsync(ExecutionData& executionData) void RefConstantWorkload::Execute(std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConstantWorkload_Execute"); memcpy(outputs[0]->Map(), m_Data.m_LayerOutput->GetConstTensor(), GetTensorInfo(outputs[0]).GetNumBytes()); - - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantWorkload_Execute"); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp index fa811e1a32..4bdcfffefa 100644 --- a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp +++ b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,7 +27,7 @@ void RefConvertFp16ToFp32Workload::ExecuteAsync(ExecutionData& executionData) void RefConvertFp16ToFp32Workload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp16ToFp32Workload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvertFp16ToFp32Workload_Execute"); const Half* const input = reinterpret_cast(inputs[0]->Map()); float* const output = reinterpret_cast(outputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp index 4992e9c07a..3c7c84a950 100644 --- a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp +++ b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -28,7 +28,7 @@ void RefConvertFp32ToFp16Workload::ExecuteAsync(ExecutionData& executionData) void RefConvertFp32ToFp16Workload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToFp16Workload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvertFp32ToFp16Workload_Execute"); const float* const input = reinterpret_cast(inputs[0]->Map()); Half* const output = reinterpret_cast(outputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp index 355d5262df..1adeb6dd93 100644 --- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,7 +43,7 @@ void RefConvolution2dWorkload::ExecuteAsync(ExecutionData& executionData) void RefConvolution2dWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution2dWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvolution2dWorkload_Execute"); std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), inputs[0]->Map()); std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), outputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp index 3ac7cd7286..0953718a85 100644 --- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp +++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -45,7 +45,7 @@ void RefConvolution3dWorkload::ExecuteAsync(ExecutionData& executionData) void RefConvolution3dWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution3dWorkload_Execute", this->GetGuid()); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefConvolution3dWorkload_Execute"); std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), inputs[0]->Map()); std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), outputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp index db67b3a782..3653bb6c13 100644 --- a/src/backends/reference/workloads/RefDebugWorkload.cpp +++ b/src/backends/reference/workloads/RefDebugWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefDebugWorkload::Execute(std::vector inputs) con { using T = ResolveType; - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp index 91bc322048..0dd98d2ef3 100644 --- a/src/backends/reference/workloads/RefDebugWorkload.hpp +++ b/src/backends/reference/workloads/RefDebugWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -20,7 +20,7 @@ public: : TypedWorkload(descriptor, info) , m_Callback(nullptr) {} - static const std::string& GetName() + virtual const std::string& GetName() const override { static const std::string name = std::string("RefDebug") + GetDataTypeName(DataType) + "Workload"; return name; @@ -39,7 +39,7 @@ private: DebugCallbackFunction m_Callback; }; -using RefDebugBFloat16Workload = RefDebugWorkload; +using RefDebugBFloat16Workload = RefDebugWorkload; using RefDebugFloat16Workload = RefDebugWorkload; using RefDebugFloat32Workload = RefDebugWorkload; using RefDebugQAsymmU8Workload = RefDebugWorkload; diff --git a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp index cb1137847b..2dddbd7de5 100644 --- a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp +++ b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -24,8 +24,7 @@ void RefDepthToSpaceWorkload::ExecuteAsync(ExecutionData& executionData) void RefDepthToSpaceWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthToSpaceWorkload_Execute"); - + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDepthToSpaceWorkload_Execute"); const TensorInfo inputInfo = GetTensorInfo(inputs[0]); DepthToSpace(inputInfo, diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp index c0677c9bf1..8779c2ed5b 100644 --- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -50,7 +50,7 @@ void RefDepthwiseConvolution2dWorkload::ExecuteAsync(ExecutionData& executionDat void RefDepthwiseConvolution2dWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthwiseConvolution2dWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDepthwiseConvolution2dWorkload_Execute"); const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape(); const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape(); diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp index aa5ff6224a..96a8f66409 100644 --- a/src/backends/reference/workloads/RefDequantizeWorkload.cpp +++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -25,7 +25,7 @@ void RefDequantizeWorkload::ExecuteAsync(ExecutionData& executionData) void RefDequantizeWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDequantizeWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp index ba7933b177..b971b5f0c9 100644 --- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp +++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefDetectionPostProcessWorkload::ExecuteAsync(ExecutionData& executionData) void RefDetectionPostProcessWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDetectionPostProcessWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefDetectionPostProcessWorkload_Execute"); const TensorInfo& boxEncodingsInfo = GetTensorInfo(inputs[0]); const TensorInfo& scoresInfo = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp index e71cdd4e3c..2f30dff211 100644 --- a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp @@ -119,7 +119,7 @@ void RefElementwiseBinaryWorkload::ExecuteAsync(ExecutionData& executionData) void RefElementwiseBinaryWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseBinaryWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefElementwiseBinaryWorkload_Execute"); if (GetTensorInfo(inputs[0]).GetDataType() == DataType::Signed32) { diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp index f4775e0c19..fa277c6a2e 100644 --- a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp @@ -45,7 +45,7 @@ void RefElementwiseUnaryWorkload::ExecuteAsync(ExecutionData& executionData) void RefElementwiseUnaryWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseUnaryWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefElementwiseUnaryWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp index 344ca344e3..0cf20985f0 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,7 +43,7 @@ template ::Execute( std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, StringMapping::Instance().Get(DebugString)); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(StringMapping::Instance().Get(DebugString)); const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp index 828204fe07..7780841766 100644 --- a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp +++ b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,7 +40,7 @@ void RefFakeQuantizationFloat32Workload::ExecuteAsync(ExecutionData& executionDa void RefFakeQuantizationFloat32Workload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFakeQuantizationFloat32Workload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefFillWorkload.cpp b/src/backends/reference/workloads/RefFillWorkload.cpp index a0f0c6b30e..cb431fe64c 100644 --- a/src/backends/reference/workloads/RefFillWorkload.cpp +++ b/src/backends/reference/workloads/RefFillWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,7 +27,7 @@ void RefFillWorkload::ExecuteAsync(ExecutionData& executionData) void RefFillWorkload::Execute(std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFillWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFillWorkload_Execute"); const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefFloorWorkload.cpp b/src/backends/reference/workloads/RefFloorWorkload.cpp index d02e529d04..300c9070c4 100644 --- a/src/backends/reference/workloads/RefFloorWorkload.cpp +++ b/src/backends/reference/workloads/RefFloorWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefFloorWorkload::ExecuteAsync(ExecutionData& executionData) void RefFloorWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFloorFloat32Workload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFloatWorkload_Execute"); const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]); std::unique_ptr> decoderPtr = MakeDecoder(inputTensorInfo, inputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp index 734d7f3503..42737e2af6 100644 --- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp +++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -47,7 +47,7 @@ void RefFullyConnectedWorkload::ExecuteAsync(ExecutionData& executionData) void RefFullyConnectedWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFullyConnectedWorkload_Execute"); std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), inputs[0]->Map()); std::unique_ptr> OutputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), outputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefGatherNdWorkload.cpp b/src/backends/reference/workloads/RefGatherNdWorkload.cpp index 9a9478c3dc..6d98d54a77 100644 --- a/src/backends/reference/workloads/RefGatherNdWorkload.cpp +++ b/src/backends/reference/workloads/RefGatherNdWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefGatherNdWorkload::ExecuteAsync(ExecutionData& executionData) void RefGatherNdWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefGatherNdWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefGatherNdWorkload_Execute"); const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefGatherWorkload.cpp b/src/backends/reference/workloads/RefGatherWorkload.cpp index 55a4c0961d..129dcf1b27 100644 --- a/src/backends/reference/workloads/RefGatherWorkload.cpp +++ b/src/backends/reference/workloads/RefGatherWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefGatherWorkload::ExecuteAsync(ExecutionData& executionData) void RefGatherWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefGatherWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefGatherWorkload_Execute"); const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp index dd4fbf3ccd..16d0547d4d 100644 --- a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp +++ b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefInstanceNormalizationWorkload::ExecuteAsync(ExecutionData& executionData void RefInstanceNormalizationWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefInstanceNormalizationWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefInstanceNormalizationWorkload_Execute"); std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), inputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp index bce8f245f5..2b64becb26 100644 --- a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp +++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -37,7 +37,7 @@ void RefL2NormalizationWorkload::ExecuteAsync(ExecutionData& executionData) void RefL2NormalizationWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefL2NormalizationWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp index a21eb459a7..e45d24a0bd 100644 --- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp +++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,7 +30,7 @@ void RefLogSoftmaxWorkload::ExecuteAsync(ExecutionData& executionData) void RefLogSoftmaxWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogSoftmaxWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLogSoftmaxWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp index b132061008..4a67832f3d 100644 --- a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp +++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -35,7 +35,7 @@ void RefLogicalBinaryWorkload::ExecuteAsync(ExecutionData& executionData) void RefLogicalBinaryWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalBinaryWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLogicalBinaryWorkload_Execute"); const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp index a84af442ab..90f306a175 100644 --- a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp +++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -35,7 +35,7 @@ void RefLogicalUnaryWorkload::ExecuteAsync(ExecutionData& executionData) void RefLogicalUnaryWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalUnaryWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLogicalUnaryWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefLstmWorkload.cpp b/src/backends/reference/workloads/RefLstmWorkload.cpp index 3879051a5b..075aa80419 100644 --- a/src/backends/reference/workloads/RefLstmWorkload.cpp +++ b/src/backends/reference/workloads/RefLstmWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -52,6 +52,8 @@ void RefLstmWorkload::ExecuteAsync(ExecutionData& executionData) void RefLstmWorkload::Execute(std::vector inputs, std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefLstmWorkload_Execute"); + // This is a porting of the LSTM::Eval() method in the Android code base // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp diff --git a/src/backends/reference/workloads/RefMeanWorkload.cpp b/src/backends/reference/workloads/RefMeanWorkload.cpp index 5d73a43a80..38c6017a58 100644 --- a/src/backends/reference/workloads/RefMeanWorkload.cpp +++ b/src/backends/reference/workloads/RefMeanWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,7 +31,7 @@ void RefMeanWorkload::ExecuteAsync(ExecutionData& executionData) void RefMeanWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMeanWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefMeanWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp index 40c9a6f449..170b1bdfa5 100644 --- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp +++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -174,7 +174,7 @@ void RefNormalizationWorkload::ExecuteAsync(ExecutionData& executionData) void RefNormalizationWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefNormalizationWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp index 9bc4efa919..c515942b30 100644 --- a/src/backends/reference/workloads/RefPadWorkload.cpp +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefPadWorkload::ExecuteAsync(ExecutionData& executionData) void RefPadWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPadWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPadWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp index e0e3b4fbd8..a2069a8ffe 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.cpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefPermuteWorkload::Execute(std::vector inputs, { using T = ResolveType; - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPermuteWorkload_Execute"); const ITensorHandle* src = inputs[0]; ITensorHandle* dst = outputs[0]; diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp index c6b8e3b12d..58f7c8efb5 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.hpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -16,12 +16,6 @@ template class RefPermuteWorkload : public TypedWorkload { public: - static const std::string& GetName() - { - static const std::string name = std::string("RefPermute") + GetDataTypeName(DataType) + "Workload"; - return name; - } - using TypedWorkload::m_Data; using TypedWorkload::TypedWorkload; void Execute() const override; diff --git a/src/backends/reference/workloads/RefPooling2dWorkload.cpp b/src/backends/reference/workloads/RefPooling2dWorkload.cpp index 9dc9a3568a..e4870567de 100644 --- a/src/backends/reference/workloads/RefPooling2dWorkload.cpp +++ b/src/backends/reference/workloads/RefPooling2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefPooling2dWorkload::ExecuteAsync(ExecutionData& executionData) void RefPooling2dWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPooling2dWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPooling2dWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefPooling3dWorkload.cpp b/src/backends/reference/workloads/RefPooling3dWorkload.cpp index 5f1eda2dab..4108b883cf 100644 --- a/src/backends/reference/workloads/RefPooling3dWorkload.cpp +++ b/src/backends/reference/workloads/RefPooling3dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefPooling3dWorkload::ExecuteAsync(ExecutionData& executionData) void RefPooling3dWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPooling3dWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPooling3dWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefPreluWorkload.cpp b/src/backends/reference/workloads/RefPreluWorkload.cpp index efe7a4c239..a99e2d3b04 100644 --- a/src/backends/reference/workloads/RefPreluWorkload.cpp +++ b/src/backends/reference/workloads/RefPreluWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,8 +31,7 @@ void RefPreluWorkload::ExecuteAsync(ExecutionData& executionData) void RefPreluWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPreluWorkload_Execute"); - + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefPreluWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& alphaInfo = GetTensorInfo(inputs[1]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefQLstmWorkload.cpp b/src/backends/reference/workloads/RefQLstmWorkload.cpp index 398faa9074..a5f939668b 100644 --- a/src/backends/reference/workloads/RefQLstmWorkload.cpp +++ b/src/backends/reference/workloads/RefQLstmWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -56,6 +56,8 @@ void RefQLstmWorkload::ExecuteAsync(ExecutionData& executionData) void RefQLstmWorkload::Execute(std::vector inputs, std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefQLstmWorkload_Execute"); + // This is a porting of the QLSTM::Execute(std::vector inputs, std::vector outputs) // method in the Android code base // Note: this implementation wraps the arithmetic functions of the LSTM cell in Quantize/Dequantize ops, so all diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp index e54ab456cd..4468cd7a94 100644 --- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp +++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -47,6 +47,8 @@ void RefQuantizeWorkload::ExecuteAsync(ExecutionData& executionData) void RefQuantizeWorkload::Execute(std::vector inputs, std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefQuantizeWorkload_Execute"); + std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(inputs[0]), inputs[0]->Map()); std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(outputs[0]), outputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefRankWorkload.hpp b/src/backends/reference/workloads/RefRankWorkload.hpp index 48109529f0..a806fe0df4 100644 --- a/src/backends/reference/workloads/RefRankWorkload.hpp +++ b/src/backends/reference/workloads/RefRankWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -25,12 +25,13 @@ public: void ExecuteAsync(ExecutionData& executionData) override { WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); - Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); + Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); } private: void Execute(std::vector inputs, std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefRankWorkload_Execute"); const int32_t rank = static_cast(GetTensorInfo(inputs[0]).GetNumDimensions()); std::memcpy(outputs[0]->Map(), &rank, sizeof(int32_t)); diff --git a/src/backends/reference/workloads/RefReduceWorkload.cpp b/src/backends/reference/workloads/RefReduceWorkload.cpp index b4b8952923..05f4cc5c7a 100644 --- a/src/backends/reference/workloads/RefReduceWorkload.cpp +++ b/src/backends/reference/workloads/RefReduceWorkload.cpp @@ -1,6 +1,6 @@ // // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved. -// Copyright © 2021-2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefReduceWorkload::ExecuteAsync(ExecutionData& executionData) void RefReduceWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReduceWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefReduceWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefReshapeWorkload.cpp b/src/backends/reference/workloads/RefReshapeWorkload.cpp index a93645e2ea..213bd10cfd 100644 --- a/src/backends/reference/workloads/RefReshapeWorkload.cpp +++ b/src/backends/reference/workloads/RefReshapeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -25,7 +25,7 @@ void RefReshapeWorkload::ExecuteAsync(ExecutionData& executionData) void RefReshapeWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReshapeWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefReshapeWorkload_Execute"); void* output = outputs[0]->Map(); const void* input = inputs[0]->Map(); diff --git a/src/backends/reference/workloads/RefResizeWorkload.cpp b/src/backends/reference/workloads/RefResizeWorkload.cpp index 39a2a29878..284f9d14f3 100644 --- a/src/backends/reference/workloads/RefResizeWorkload.cpp +++ b/src/backends/reference/workloads/RefResizeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,7 +30,7 @@ void RefResizeWorkload::ExecuteAsync(ExecutionData& executionData) void RefResizeWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefResizeWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefReverseV2Workload.cpp b/src/backends/reference/workloads/RefReverseV2Workload.cpp index 22d5449466..b0d2f445b5 100644 --- a/src/backends/reference/workloads/RefReverseV2Workload.cpp +++ b/src/backends/reference/workloads/RefReverseV2Workload.cpp @@ -29,7 +29,7 @@ namespace armnn void RefReverseV2Workload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReverseV2Workload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefReverseV2Workload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& axisInfo = GetTensorInfo(inputs[1]); diff --git a/src/backends/reference/workloads/RefShapeWorkload.hpp b/src/backends/reference/workloads/RefShapeWorkload.hpp index bc4d50ac92..fa36f49003 100644 --- a/src/backends/reference/workloads/RefShapeWorkload.hpp +++ b/src/backends/reference/workloads/RefShapeWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,6 +30,8 @@ public: private: void Execute(std::vector inputs, std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefShapeWorkload_Execute"); + const TensorShape Shape = GetTensorInfo(inputs[0]).GetShape(); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefSliceWorkload.cpp b/src/backends/reference/workloads/RefSliceWorkload.cpp index 60c3950c32..ca8c2a0169 100644 --- a/src/backends/reference/workloads/RefSliceWorkload.cpp +++ b/src/backends/reference/workloads/RefSliceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -26,7 +26,7 @@ void RefSliceWorkload::ExecuteAsync(ExecutionData& executionData) void RefSliceWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSliceWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSliceWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp index f2579ce388..f8034b5b01 100644 --- a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp +++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,7 +30,7 @@ void RefSoftmaxWorkload::ExecuteAsync(ExecutionData& executionData) void RefSoftmaxWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSoftmaxWorkload_Execute"); const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp index d29c2c801e..5cb387207a 100644 --- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp +++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp @@ -24,7 +24,7 @@ void RefSpaceToBatchNdWorkload::ExecuteAsync(ExecutionData& executionData) void RefSpaceToBatchNdWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToBatchNdWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSpaceToBatchNdWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp index e8dd052e94..1b1afca11a 100644 --- a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp +++ b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -25,7 +25,7 @@ void RefSpaceToDepthWorkload::ExecuteAsync(ExecutionData& executionData) void RefSpaceToDepthWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSpaceToDepthWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSpaceToDepthWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); std::unique_ptr> decoder = MakeDecoder(inputInfo, inputs[0]->Map()); diff --git a/src/backends/reference/workloads/RefSplitterWorkload.cpp b/src/backends/reference/workloads/RefSplitterWorkload.cpp index 93b393b243..dcd7d6cb39 100644 --- a/src/backends/reference/workloads/RefSplitterWorkload.cpp +++ b/src/backends/reference/workloads/RefSplitterWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -24,7 +24,8 @@ void RefSplitterWorkload::ExecuteAsync(ExecutionData& executionData) void RefSplitterWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSplitterWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefSplitterWorkload_Execute"); + Split(m_Data, inputs, outputs); } diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp index e35c2d52c6..f24c6e6e2e 100644 --- a/src/backends/reference/workloads/RefStackWorkload.cpp +++ b/src/backends/reference/workloads/RefStackWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,7 +31,7 @@ void RefStackWorkload::ExecuteAsync(ExecutionData& executionData) void RefStackWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStackWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStackWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp index f5ca0c18d7..c4a4f7f593 100644 --- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp +++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -28,7 +28,7 @@ void RefStridedSliceWorkload::ExecuteAsync(ExecutionData& executionData) void RefStridedSliceWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStridedSliceWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStridedSliceWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefTileWorkload.cpp b/src/backends/reference/workloads/RefTileWorkload.cpp index 9fa8c8c3d3..9062f49c26 100644 --- a/src/backends/reference/workloads/RefTileWorkload.cpp +++ b/src/backends/reference/workloads/RefTileWorkload.cpp @@ -28,7 +28,7 @@ namespace armnn void RefTileWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTileWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefTileWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); diff --git a/src/backends/reference/workloads/RefTileWorkload.hpp b/src/backends/reference/workloads/RefTileWorkload.hpp index 2fb8eab05e..f168192a18 100644 --- a/src/backends/reference/workloads/RefTileWorkload.hpp +++ b/src/backends/reference/workloads/RefTileWorkload.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp index 1269b3ff04..16ecab1878 100644 --- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019,2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -47,7 +47,7 @@ void RefTransposeConvolution2dWorkload::ExecuteAsync(ExecutionData& executionDat void RefTransposeConvolution2dWorkload::Execute(std::vector inputs, std::vector outputs) const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefTransposeConvolution2dWorkload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefTransposeConvolution2dWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp index 6c94e7d2c8..3c679e85eb 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.cpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,7 +32,7 @@ void RefTransposeWorkload::Execute(std::vector inputs, { using T = ResolveType; - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefTransposeWorkload_Execute"); const ITensorHandle* src = inputs[0]; ITensorHandle* dst = outputs[0]; diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp index db4f683699..97ebbd80ae 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.hpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp @@ -16,12 +16,6 @@ template class RefTransposeWorkload : public TypedWorkload { public: - static const std::string& GetName() - { - static const std::string name = std::string("RefTranspose") + GetDataTypeName(DataType) + "Workload"; - return name; - } - using TypedWorkload::m_Data; using TypedWorkload::TypedWorkload; void Execute() const override; diff --git a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp index 23022d076c..c7a4b76964 100644 --- a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp +++ b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -57,6 +57,8 @@ void RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(ExecutionData& executio void RefUnidirectionalSequenceLstmWorkload::Execute(std::vector inputs, std::vector outputs) const { + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefUnidirectionalSequenceLstmWorkload_Execute"); + TensorInfo inputInfo = GetTensorInfo(inputs[0]); const TensorInfo& outputStateInfo = GetTensorInfo(inputs[1]); const TensorInfo& cellStateInfo = GetTensorInfo(inputs[2]); diff --git a/src/backends/reference/workloads/RefWorkloadUtils.hpp b/src/backends/reference/workloads/RefWorkloadUtils.hpp index 7c35966f0f..c840887fc0 100644 --- a/src/backends/reference/workloads/RefWorkloadUtils.hpp +++ b/src/backends/reference/workloads/RefWorkloadUtils.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2023 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -18,6 +18,12 @@ namespace armnn { +/// Creates a profiling event that uses GetGuid() and GetName() from the calling class +#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label) \ +ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuRef, \ + this->GetGuid(), \ + this->GetName() + "_" + label, \ + armnn::WallClockTimer()) //////////////////////////////////////////// /// float32 helpers -- cgit v1.2.1