ArmNN
 21.02
RefComparisonWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Encoders.hpp"
11 #include "RefWorkloadUtils.hpp"
12 
13 #include <Profiling.hpp>
14 
15 #include <armnn/TypesUtils.hpp>
16 
17 #include <functional>
18 
19 namespace armnn
20 {
21 
23  const WorkloadInfo& info)
25 {}
26 
28 {
29  const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
30  const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
31  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
32 
33  m_Input0 = MakeDecoder<InType>(inputInfo0);
34  m_Input1 = MakeDecoder<InType>(inputInfo1);
35 
36  m_Output = MakeEncoder<OutType>(outputInfo);
37 }
38 
40 {
41  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute");
42 
43  const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
44  const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
45  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
46 
47  const TensorShape& inShape0 = inputInfo0.GetShape();
48  const TensorShape& inShape1 = inputInfo1.GetShape();
49  const TensorShape& outShape = outputInfo.GetShape();
50 
51  m_Input0->Reset(m_Data.m_Inputs[0]->Map());
52  m_Input1->Reset(m_Data.m_Inputs[1]->Map());
53  m_Output->Reset(m_Data.m_Outputs[0]->Map());
54 
56  using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>;
57  using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
58  using LessFunction = ElementwiseBinaryFunction<std::less<InType>>;
59  using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>;
61 
63  {
65  {
66  EqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
67  break;
68  }
70  {
71  GreaterFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
72  break;
73  }
75  {
76  GreaterOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
77  break;
78  }
80  {
81  LessFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
82  break;
83  }
85  {
86  LessOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
87  break;
88  }
90  {
91  NotEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
92  break;
93  }
94  default:
95  {
96  throw InvalidArgumentException(std::string("Unsupported comparison operation ") +
98  }
99  }
100 }
101 
102 } // namespace armnn
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
CPU Execution: Reference C++ kernels.
const ComparisonQueueDescriptor m_Data
Definition: Workload.hpp:46
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:173
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
RefComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:57
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers