From 549b9600a6eaf0727fa084465a75f173edf8f381 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Tue, 24 May 2022 11:32:07 +0100 Subject: Update 22.05 Doxygen Docs after updates to main Readme Signed-off-by: Nikhil Raj Change-Id: I56711772406a41ff81fa136a5fb6c59c9b9cf504 --- 22.05/_arg_min_max_test_impl_8hpp.xhtml | 396 ++++++++++++++++++++++++++++++++ 1 file changed, 396 insertions(+) create mode 100644 22.05/_arg_min_max_test_impl_8hpp.xhtml (limited to '22.05/_arg_min_max_test_impl_8hpp.xhtml') diff --git a/22.05/_arg_min_max_test_impl_8hpp.xhtml b/22.05/_arg_min_max_test_impl_8hpp.xhtml new file mode 100644 index 0000000000..0a4434f438 --- /dev/null +++ b/22.05/_arg_min_max_test_impl_8hpp.xhtml @@ -0,0 +1,396 @@ + + + + + + + + + + + + + +ArmNN: src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  22.05 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
ArgMinMaxTestImpl.hpp File Reference
+
+
+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + +

+Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< int32_t, 3 > ArgMinSimpleTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< int32_t, 3 > ArgMinChannelTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< int32_t, 3 > ArgMaxChannelTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< int32_t, 3 > ArgMaxHeightTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< int32_t, 3 > ArgMinWidthTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
+

Function Documentation

+ +

◆ ArgMaxChannelTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int32_t, 3> ArgMaxChannelTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory 
)
+
+ +

Definition at line 158 of file ArgMinMaxTestImpl.cpp.

+ +

References armnn::Max, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Signed32.

+
162 {
163  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
164  const armnn::TensorShape outputShape{ 1, 2, 4 };
165 
166  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
167 
168  if (armnn::IsQuantizedType<T>())
169  {
170  inputTensorInfo.SetQuantizationScale(1.0f);
171  inputTensorInfo.SetQuantizationOffset(0);
172  }
173 
174  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
175 
176  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
177  5.0f, 6.0f, 7.0f, 8.0f,
178 
179  10.0f, 20.0f, 30.0f, 40.0f,
180  50.0f, 60.0f, 70.0f, 80.0f,
181 
182  100.0f, 200.0f, 300.0f, 400.0f,
183  500.0f, 600.0f, 700.0f, 800.0f });
184  std::vector<int32_t> outputValues({ 2, 2, 2, 2,
185  2, 2, 2, 2 });
186 
187  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
189  inputTensorInfo, outputTensorInfo,
190  inputValues, outputValues, 1);
191 }
+ + + +
+
+
+ +

◆ ArgMaxHeightTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int32_t, 3> ArgMaxHeightTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory 
)
+
+ +

Definition at line 194 of file ArgMinMaxTestImpl.cpp.

+ +

References armnn::Max, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Signed32.

+
198 {
199  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
200  const armnn::TensorShape outputShape{ 1, 3, 4 };
201 
202  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
203  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
204 
205  if (armnn::IsQuantizedType<T>())
206  {
207  inputTensorInfo.SetQuantizationScale(1.0f);
208  inputTensorInfo.SetQuantizationOffset(0);
209  }
210 
211  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
212  5.0f, 6.0f, 7.0f, 8.0f,
213 
214  10.0f, 20.0f, 30.0f, 40.0f,
215  50.0f, 60.0f, 70.0f, 80.0f,
216 
217  100.0f, 200.0f, 300.0f, 400.0f,
218  500.0f, 600.0f, 700.0f, 800.0f });
219  std::vector<int32_t> outputValues({ 1, 1, 1, 1,
220  1, 1, 1, 1,
221  1, 1, 1, 1 });
222 
223  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
225  inputTensorInfo, outputTensorInfo,
226  inputValues, outputValues, 2);
227 }
+ + + +
+
+
+ +

◆ ArgMaxSimpleTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int32_t, 3> ArgMaxSimpleTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory 
)
+
+ +

Definition at line 66 of file ArgMinMaxTestImpl.cpp.

+ +

References armnn::Max, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Signed32.

+
70 {
71  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
72  const armnn::TensorShape outputShape{ 1, 1, 1 };
73 
74  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
75 
76  if (armnn::IsQuantizedType<T>())
77  {
78  inputTensorInfo.SetQuantizationScale(1.0f);
79  inputTensorInfo.SetQuantizationOffset(0);
80  }
81 
82  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
83 
84  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
85  std::vector<int32_t> outputValues({ 3 });
86 
87  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
89  inputTensorInfo, outputTensorInfo,
90  inputValues, outputValues, -1); // axis -1 === 3
91 }
+ + + +
+
+
+ +

◆ ArgMinChannelTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int32_t, 3> ArgMinChannelTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory 
)
+
+ +

Definition at line 122 of file ArgMinMaxTestImpl.cpp.

+ +

References armnn::Min, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Signed32.

+
126 {
127  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
128  const armnn::TensorShape outputShape{ 1, 2, 4 };
129 
130  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
131 
132  if (armnn::IsQuantizedType<T>())
133  {
134  inputTensorInfo.SetQuantizationScale(1.0f);
135  inputTensorInfo.SetQuantizationOffset(0);
136  }
137 
138  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
139 
140  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
141  5.0f, 6.0f, 7.0f, 8.0f,
142 
143  10.0f, 20.0f, 30.0f, 40.0f,
144  50.0f, 60.0f, 70.0f, 80.0f,
145 
146  100.0f, 200.0f, 300.0f, 400.0f,
147  500.0f, 600.0f, 700.0f, 800.0f });
148  std::vector<int32_t> outputValues({ 0, 0, 0, 0,
149  0, 0, 0, 0 });
150 
151  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
153  inputTensorInfo, outputTensorInfo,
154  inputValues, outputValues, 1);
155 }
+ + + +
+
+
+ +

◆ ArgMinSimpleTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int32_t, 3> ArgMinSimpleTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory 
)
+
+ +

Definition at line 94 of file ArgMinMaxTestImpl.cpp.

+ +

References armnn::Min, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Signed32.

+
98 {
99  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
100  const armnn::TensorShape outputShape{ 1, 1, 1 };
101 
102  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
103 
104  if (armnn::IsQuantizedType<T>())
105  {
106  inputTensorInfo.SetQuantizationScale(1.0f);
107  inputTensorInfo.SetQuantizationOffset(0);
108  }
109 
110  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
111 
112  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
113  std::vector<int32_t> outputValues({ 1 });
114 
115  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
117  inputTensorInfo, outputTensorInfo,
118  inputValues, outputValues, 3);
119 }
+ + + +
+
+
+ +

◆ ArgMinWidthTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int32_t, 3> ArgMinWidthTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory 
)
+
+ +

Definition at line 230 of file ArgMinMaxTestImpl.cpp.

+ +

References armnn::Min, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Signed32.

+
234 {
235  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
236  const armnn::TensorShape outputShape{ 1, 3, 2 };
237 
238  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
239  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
240 
241  if (armnn::IsQuantizedType<T>())
242  {
243  inputTensorInfo.SetQuantizationScale(1.0f);
244  inputTensorInfo.SetQuantizationOffset(0);
245  }
246 
247  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
248  5.0f, 6.0f, 7.0f, 8.0f,
249 
250  10.0f, 20.0f, 30.0f, 40.0f,
251  50.0f, 60.0f, 70.0f, 80.0f,
252 
253  100.0f, 200.0f, 300.0f, 400.0f,
254  500.0f, 600.0f, 700.0f, 800.0f });
255  std::vector<int32_t> outputValues({ 0, 0,
256  0, 0,
257  0, 0 });
258 
259  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
261  inputTensorInfo, outputTensorInfo,
262  inputValues, outputValues, 3);
263 }
+ + + +
+
+
+
+
+ + + + -- cgit v1.2.1