ArmNN
 24.02
NeonComparisonWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
11 
12 namespace armnn
13 {
14 using namespace armcomputetensorutils;
15 
17  const TensorInfo& input1,
18  const TensorInfo& output,
19  const ComparisonDescriptor& descriptor)
20 {
21  const arm_compute::TensorInfo aclInput0 = BuildArmComputeTensorInfo(input0);
22  const arm_compute::TensorInfo aclInput1 = BuildArmComputeTensorInfo(input1);
23  const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
24 
25  const arm_compute::ComparisonOperation comparisonOperation = ConvertComparisonOperationToAcl(descriptor);
26 
27  const arm_compute::Status aclStatus = arm_compute::NEElementwiseComparison::validate(&aclInput0,
28  &aclInput1,
29  &aclOutput,
30  comparisonOperation);
31  return aclStatus;
32 }
33 
36 {
37  // Report Profiling Details
38  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonComparisonWorkload_Construct",
39  descriptor.m_Parameters,
40  info,
41  this->GetGuid());
42 
43  m_Data.ValidateInputsOutputs("NeonComparisonWorkload", 2, 1);
44 
45  arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
46  arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
47  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
48 
50 
51  m_ComparisonLayer.configure(&input0, &input1, &output, comparisonOperation);
52 }
53 
55 {
56  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonComparisonWorkload_Execute");
57  m_ComparisonLayer.run();
58 }
59 
60 } //namespace armnn
NeonComparisonWorkload.hpp
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:446
armnn::ComparisonQueueDescriptor
Definition: WorkloadData.hpp:666
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::NeonComparisonWorkload::Execute
virtual void Execute() const override
Definition: NeonComparisonWorkload.cpp:54
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
PolymorphicDowncast.hpp
ArmComputeUtils.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::ConvertComparisonOperationToAcl
arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor &descriptor)
Definition: ArmComputeUtils.hpp:141
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::ComparisonOperation
ComparisonOperation
Definition: Types.hpp:109
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::BaseWorkload< ComparisonQueueDescriptor >::m_Data
ComparisonQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: NeonWorkloadUtils.hpp:32
armnn::NeonComparisonWorkload::NeonComparisonWorkload
NeonComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: NeonComparisonWorkload.cpp:34
armnn::NeonBaseWorkload
Definition: NeonBaseWorkload.hpp:13
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26