ArmNN
 22.08
RefComparisonWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Encoders.hpp"
11 #include "RefWorkloadUtils.hpp"
12 
13 #include <Profiling.hpp>
14 
15 #include <armnn/TypesUtils.hpp>
16 
17 #include <functional>
18 
19 namespace armnn
20 {
21 
23  const WorkloadInfo& info)
25 {}
26 
28 {
30 }
31 
32 void RefComparisonWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
33  std::vector<ITensorHandle*> outputs)
34 {
35  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
36  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
37  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
38 
39  m_Input0 = MakeDecoder<InType>(inputInfo0);
40  m_Input1 = MakeDecoder<InType>(inputInfo1);
41 
42  m_Output = MakeEncoder<OutType>(outputInfo);
43 }
44 
46 {
48 }
49 
51 {
52  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
53 
54  PostAllocationConfigure(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
55  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
56 }
57 
58 void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
59 {
60  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute");
61 
62  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
63  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
64  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
65 
66  const TensorShape& inShape0 = inputInfo0.GetShape();
67  const TensorShape& inShape1 = inputInfo1.GetShape();
68  const TensorShape& outShape = outputInfo.GetShape();
69 
70  m_Input0->Reset(inputs[0]->Map());
71  m_Input1->Reset(inputs[1]->Map());
72  m_Output->Reset(outputs[0]->Map());
73 
75  using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>;
76  using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
77  using LessFunction = ElementwiseBinaryFunction<std::less<InType>>;
78  using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>;
80 
82  {
84  {
85  EqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
86  break;
87  }
89  {
90  GreaterFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
91  break;
92  }
94  {
95  GreaterOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
96  break;
97  }
99  {
100  LessFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
101  break;
102  }
104  {
105  LessOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
106  break;
107  }
109  {
110  NotEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
111  break;
112  }
113  default:
114  {
115  throw InvalidArgumentException(std::string("Unsupported comparison operation ") +
117  }
118  }
119 }
120 
121 } // namespace armnn
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
void ExecuteAsync(ExecutionData &executionData) override
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
RefComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:57
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers