ArmNN
 23.11
RefComparisonWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Encoders.hpp"
11 #include "RefWorkloadUtils.hpp"
12 
13 #include <Profiling.hpp>
14 
15 #include <armnn/TypesUtils.hpp>
16 
17 #include <functional>
18 
19 namespace armnn
20 {
21 
23  const WorkloadInfo& info)
25 {}
26 
28 {
30 }
31 
32 void RefComparisonWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
33  std::vector<ITensorHandle*> outputs)
34 {
35  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
36  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
37  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
38 
39  m_Input0 = MakeDecoder<InType>(inputInfo0);
40  m_Input1 = MakeDecoder<InType>(inputInfo1);
41 
42  m_Output = MakeEncoder<OutType>(outputInfo);
43 }
44 
46 {
48 }
49 
51 {
52  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
53 
54  PostAllocationConfigure(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
55  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
56 }
57 
58 void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
59 {
60  ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefComparisonWorkload_Execute");
61 
62  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
63  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
64  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
65 
66  const TensorShape& inShape0 = inputInfo0.GetShape();
67  const TensorShape& inShape1 = inputInfo1.GetShape();
68  const TensorShape& outShape = outputInfo.GetShape();
69 
70  m_Input0->Reset(inputs[0]->Map());
71  m_Input1->Reset(inputs[1]->Map());
72  m_Output->Reset(outputs[0]->Map());
73 
75  using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>;
76  using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
77  using LessFunction = ElementwiseBinaryFunction<std::less<InType>>;
78  using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>;
80 
82  {
84  {
85  EqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
86  break;
87  }
89  {
90  GreaterFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
91  break;
92  }
94  {
95  GreaterOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
96  break;
97  }
99  {
100  LessFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
101  break;
102  }
104  {
105  LessOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
106  break;
107  }
109  {
110  NotEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
111  break;
112  }
113  default:
114  {
115  throw InvalidArgumentException(std::string("Unsupported comparison operation ") +
117  }
118  }
119 }
120 
121 } // namespace armnn
armnn::ComparisonOperation::LessOrEqual
@ LessOrEqual
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition: ExecutionData.hpp:16
armnn::ComparisonQueueDescriptor
Definition: WorkloadData.hpp:666
TypesUtils.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
Profiling.hpp
armnn::RefComparisonWorkload::PostAllocationConfigure
void PostAllocationConfigure() override
Definition: RefComparisonWorkload.cpp:27
ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: RefWorkloadUtils.hpp:22
RefComparisonWorkload.hpp
armnn::ComparisonOperation::NotEqual
@ NotEqual
armnn::ComparisonOperation::GreaterOrEqual
@ GreaterOrEqual
armnn::ElementwiseBinaryFunction
Definition: ElementwiseFunction.hpp:15
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::RefComparisonWorkload::Execute
void Execute() const override
Definition: RefComparisonWorkload.cpp:45
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
armnn::ComparisonOperation::Less
@ Less
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::ComparisonDescriptor::m_Operation
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:105
RefWorkloadUtils.hpp
armnn::BaseWorkload< ComparisonQueueDescriptor >::m_Data
ComparisonQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RefComparisonWorkload::RefComparisonWorkload
RefComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: RefComparisonWorkload.cpp:22
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
ElementwiseFunction.hpp
Decoders.hpp
armnn::LayerType::Map
@ Map
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkingMemDescriptor.hpp:20
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
armnn::ComparisonOperation::Equal
@ Equal
Encoders.hpp
armnn::RefBaseWorkload
Definition: RefBaseWorkload.hpp:13
armnn::GetComparisonOperationAsCString
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:62
armnn::RefComparisonWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition: RefComparisonWorkload.cpp:50
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkingMemDescriptor.hpp:21
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::experimental::ExecutionData
Definition: ExecutionData.hpp:14
armnn::ComparisonOperation::Greater
@ Greater