diff options
Diffstat (limited to 'src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp')
-rw-r--r-- | src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp index 5e2925ca02..027b508ad5 100644 --- a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp +++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp @@ -10,12 +10,12 @@ namespace armnn NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager) - : Float32Workload<SoftmaxQueueDescriptor>(descriptor, info) + : FloatWorkload<SoftmaxQueueDescriptor>(descriptor, info) , m_SoftmaxLayer(memoryManager) { m_Data.ValidateInputsOutputs("NeonSoftmaxFloat32Workload", 1, 1); - // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions + // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions. arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); @@ -24,7 +24,7 @@ NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescrip void NeonSoftmaxFloat32Workload::Execute() const { - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonSoftmaxFloat32Workload_Execute"); + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSoftmaxFloat32Workload_Execute"); m_SoftmaxLayer.run(); } |