aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads/NeonSoftmaxWorkload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxWorkload.cpp8
1 files changed, 7 insertions, 1 deletions
diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
index 505844e24a..da20479d82 100644
--- a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
@@ -34,6 +34,12 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
: BaseWorkload<SoftmaxQueueDescriptor>(descriptor, info)
{
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSoftmaxWorkload_Construct",
+ descriptor.m_Parameters,
+ info,
+ this->GetGuid());
+
m_Data.ValidateInputsOutputs("NeonSoftmaxWorkload", 1, 1);
// The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
@@ -48,7 +54,7 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto
void NeonSoftmaxWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonSoftmaxWorkload_Execute", this->GetGuid());
m_SoftmaxLayer->run();
}