ArmNN
 22.05.01
RefComparisonWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Encoders.hpp"
11 #include "RefWorkloadUtils.hpp"
12 
13 #include <Profiling.hpp>
14 
15 #include <armnn/TypesUtils.hpp>
16 
17 #include <functional>
18 
19 namespace armnn
20 {
21 
23  const WorkloadInfo& info)
25 {}
26 
28 {
30 }
31 
32 void RefComparisonWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
33  std::vector<ITensorHandle*> outputs)
34 {
35  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
36  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
37  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
38 
39  m_Input0 = MakeDecoder<InType>(inputInfo0);
40  m_Input1 = MakeDecoder<InType>(inputInfo1);
41 
42  m_Output = MakeEncoder<OutType>(outputInfo);
43 }
44 
46 {
48 }
49 
51 {
52  PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
53 
54  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
55 }
56 
57 void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
58 {
59  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute");
60 
61  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
62  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
63  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
64 
65  const TensorShape& inShape0 = inputInfo0.GetShape();
66  const TensorShape& inShape1 = inputInfo1.GetShape();
67  const TensorShape& outShape = outputInfo.GetShape();
68 
69  m_Input0->Reset(inputs[0]->Map());
70  m_Input1->Reset(inputs[1]->Map());
71  m_Output->Reset(outputs[0]->Map());
72 
74  using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>;
75  using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
76  using LessFunction = ElementwiseBinaryFunction<std::less<InType>>;
77  using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>;
79 
81  {
83  {
84  EqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
85  break;
86  }
88  {
89  GreaterFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
90  break;
91  }
93  {
94  GreaterOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
95  break;
96  }
98  {
99  LessFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
100  break;
101  }
103  {
104  LessOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
105  break;
106  }
108  {
109  NotEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
110  break;
111  }
112  default:
113  {
114  throw InvalidArgumentException(std::string("Unsupported comparison operation ") +
116  }
117  }
118 }
119 
120 } // namespace armnn
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
RefComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:57
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers