From ae050524109f1ce827962665436ef7430f2ac479 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Wed, 22 Mar 2023 16:48:58 +0000 Subject: IVGCVSW-7255 Update Doxygen Documentation and publish on GitHub. * Updating Doxygen documentation for 23.02 release. Signed-off-by: David Monahan Change-Id: I545574ff7664b4595d2fe6a91a3c35d2ad55df82 --- 23.02/_ref_comparison_workload_8cpp_source.xhtml | 228 +++++++++++++++++------ 1 file changed, 176 insertions(+), 52 deletions(-) (limited to '23.02/_ref_comparison_workload_8cpp_source.xhtml') diff --git a/23.02/_ref_comparison_workload_8cpp_source.xhtml b/23.02/_ref_comparison_workload_8cpp_source.xhtml index da4f138c0b..9df03cb1fb 100644 --- a/23.02/_ref_comparison_workload_8cpp_source.xhtml +++ b/23.02/_ref_comparison_workload_8cpp_source.xhtml @@ -8,7 +8,7 @@ - + ArmNN: src/backends/reference/workloads/RefComparisonWorkload.cpp Source File @@ -19,9 +19,6 @@ - @@ -30,7 +27,8 @@ extensions: ["tex2jax.js"], jax: ["input/TeX","output/HTML-CSS"], }); - + + @@ -51,18 +49,21 @@ - + +/* @license-end */
@@ -76,7 +77,9 @@ $(function() {
@@ -98,57 +101,178 @@ $(document).ready(function(){initNavTree('_ref_comparison_workload_8cpp_source.x
RefComparisonWorkload.cpp
-Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Encoders.hpp"
11 #include "RefWorkloadUtils.hpp"
12 
13 #include <Profiling.hpp>
14 
15 #include <armnn/TypesUtils.hpp>
16 
17 #include <functional>
18 
19 namespace armnn
20 {
21 
23  const WorkloadInfo& info)
25 {}
26 
28 {
30 }
31 
32 void RefComparisonWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
33  std::vector<ITensorHandle*> outputs)
34 {
35  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
36  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
37  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
38 
39  m_Input0 = MakeDecoder<InType>(inputInfo0);
40  m_Input1 = MakeDecoder<InType>(inputInfo1);
41 
42  m_Output = MakeEncoder<OutType>(outputInfo);
43 }
44 
46 {
48 }
49 
51 {
52  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
53 
54  PostAllocationConfigure(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
55  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
56 }
57 
58 void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
59 {
60  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute");
61 
62  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
63  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
64  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
65 
66  const TensorShape& inShape0 = inputInfo0.GetShape();
67  const TensorShape& inShape1 = inputInfo1.GetShape();
68  const TensorShape& outShape = outputInfo.GetShape();
69 
70  m_Input0->Reset(inputs[0]->Map());
71  m_Input1->Reset(inputs[1]->Map());
72  m_Output->Reset(outputs[0]->Map());
73 
75  using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>;
76  using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
77  using LessFunction = ElementwiseBinaryFunction<std::less<InType>>;
78  using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>;
80 
82  {
84  {
85  EqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
86  break;
87  }
89  {
90  GreaterFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
91  break;
92  }
94  {
95  GreaterOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
96  break;
97  }
99  {
100  LessFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
101  break;
102  }
104  {
105  LessOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
106  break;
107  }
109  {
110  NotEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
111  break;
112  }
113  default:
114  {
115  throw InvalidArgumentException(std::string("Unsupported comparison operation ") +
117  }
118  }
119 }
120 
121 } // namespace armnn
- -
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
-
CPU Execution: Reference C++ kernels.
- - - - -
Copyright (c) 2021 ARM Limited and Contributors.
- - -
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
- -
void ExecuteAsync(ExecutionData &executionData) override
- - - - - - - - - - - -
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
- -
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
-
RefComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
- -
std::vector< ITensorHandle * > m_Outputs
- - -
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:61
- -
Contains information about TensorInfos of a layer.
-
std::vector< ITensorHandle * > m_Inputs
- - - - -
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 
+ +
7 
+
8 #include "Decoders.hpp"
+ +
10 #include "Encoders.hpp"
+
11 #include "RefWorkloadUtils.hpp"
+
12 
+
13 #include <Profiling.hpp>
+
14 
+
15 #include <armnn/TypesUtils.hpp>
+
16 
+
17 #include <functional>
+
18 
+
19 namespace armnn
+
20 {
+
21 
+ +
23  const WorkloadInfo& info)
+ +
25 {}
+
26 
+ +
28 {
+ +
30 }
+
31 
+
32 void RefComparisonWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
+
33  std::vector<ITensorHandle*> outputs)
+
34 {
+
35  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
+
36  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
+
37  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
+
38 
+
39  m_Input0 = MakeDecoder<InType>(inputInfo0);
+
40  m_Input1 = MakeDecoder<InType>(inputInfo1);
+
41 
+
42  m_Output = MakeEncoder<OutType>(outputInfo);
+
43 }
+
44 
+ +
46 {
+ +
48 }
+
49 
+ +
51 {
+
52  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+
53 
+
54  PostAllocationConfigure(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+
55  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+
56 }
+
57 
+
58 void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
+
59 {
+
60  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefComparisonWorkload_Execute");
+
61 
+
62  const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]);
+
63  const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]);
+
64  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
+
65 
+
66  const TensorShape& inShape0 = inputInfo0.GetShape();
+
67  const TensorShape& inShape1 = inputInfo1.GetShape();
+
68  const TensorShape& outShape = outputInfo.GetShape();
+
69 
+
70  m_Input0->Reset(inputs[0]->Map());
+
71  m_Input1->Reset(inputs[1]->Map());
+
72  m_Output->Reset(outputs[0]->Map());
+
73 
+ +
75  using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>;
+
76  using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>;
+
77  using LessFunction = ElementwiseBinaryFunction<std::less<InType>>;
+
78  using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>;
+ +
80 
+ +
82  {
+ +
84  {
+
85  EqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+
86  break;
+
87  }
+ +
89  {
+
90  GreaterFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+
91  break;
+
92  }
+ +
94  {
+
95  GreaterOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+
96  break;
+
97  }
+ +
99  {
+
100  LessFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+
101  break;
+
102  }
+ +
104  {
+
105  LessOrEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+
106  break;
+
107  }
+ +
109  {
+
110  NotEqualFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+
111  break;
+
112  }
+
113  default:
+
114  {
+
115  throw InvalidArgumentException(std::string("Unsupported comparison operation ") +
+ +
117  }
+
118  }
+
119 }
+
120 
+
121 } // namespace armnn
+
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+ + + +
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
+ +
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
+ + +
void ExecuteAsync(ExecutionData &executionData) override
+ + + + + + +
Copyright (c) 2021 ARM Limited and Contributors.
+ + +
RefComparisonWorkload(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &info)
+ + + +
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
+ + +
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:61
+
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
+
Contains information about TensorInfos of a layer.
+ + + + + + +
std::vector< ITensorHandle * > m_Outputs
+ + +
@ CpuRef
CPU Execution: Reference C++ kernels.
+
std::vector< ITensorHandle * > m_Inputs
+ + -- cgit v1.2.1