aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp8
1 files changed, 7 insertions, 1 deletions
diff --git a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
index ba5c9000f4..8a9743298b 100644
--- a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
@@ -35,6 +35,12 @@ NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor&
std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: BaseWorkload<LogSoftmaxQueueDescriptor>(descriptor, info)
{
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonLogSoftmaxWorkload_Construct",
+ descriptor.m_Parameters,
+ info,
+ this->GetGuid());
+
m_Data.ValidateInputsOutputs("NeonLogSoftmaxWorkload", 1, 1);
arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
@@ -48,7 +54,7 @@ NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor&
void NeonLogSoftmaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonLogSoftmaxWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLogSoftmaxWorkload_Execute", this->GetGuid());
m_LogSoftmaxLayer->run();
}