ArmNN
 22.02
ReductionTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ReductionTestImpl.hpp"
7 
8 #include <DataTypeUtils.hpp>
11 
13 
14 #include <iostream>
15 
16 namespace
17 {
18 
19 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
20 LayerTestResult<float, 4> ReductionTestCommon(
21  armnn::IWorkloadFactory& workloadFactory,
23  const armnn::ITensorHandleFactory& tensorHandleFactory,
24  const armnn::TensorInfo inputTensorInfo,
25  const armnn::TensorInfo outputTensorInfo,
26  const std::vector<float>& inputData,
27  const std::vector<float>& outputData,
28  const std::vector<int32_t> vAxis,
29  const armnn::ReduceOperation reduceOperation,
30  bool keepDims = false)
31 {
32  IgnoreUnused(memoryManager);
33  auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
34 
35  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
36 
37  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
38  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
39 
41  std::vector<uint32_t> updated_idx;
42  uint32_t resolvedAxis = 0;
43  for (uint32_t i = 0; i < vAxis.size(); ++i)
44  {
45  if (vAxis[i] < 0)
46  {
47  resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
48  } else
49  {
50  resolvedAxis = static_cast<uint32_t>(vAxis[i]);
51  }
52 
53  updated_idx.push_back(resolvedAxis);
54  }
55 
56  descriptor.m_Parameters.m_vAxis = updated_idx;
57  descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
58  descriptor.m_Parameters.m_KeepDims = keepDims;
60 
61  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
62  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
63 
64  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
65  descriptor,
66  info);
67 
68  inputHandle->Allocate();
69  outputHandle->Allocate();
70 
71  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
72 
73  workload->Execute();
74 
75  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
76 
77  return LayerTestResult<float, 4>(actualOutput,
78  outputData,
79  outputHandle->GetShape(),
80  outputTensorInfo.GetShape());
81 }
82 
83 } // namespace
84 
85 template<armnn::DataType ArmnnType, typename T>
87  armnn::IWorkloadFactory& workloadFactory,
89  const armnn::ITensorHandleFactory& tensorHandleFactory)
90 {
91  const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
92  const armnn::TensorShape outputShape{ 1, 1, 1, 3};
93 
94  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
95 
96  if (armnn::IsQuantizedType<T>())
97  {
98  inputTensorInfo.SetQuantizationScale(1.0f);
99  inputTensorInfo.SetQuantizationOffset(0);
100  }
101 
102  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
103 
104  std::vector<float> inputValues
105  ({
106  1001.0f, 11.0f, 1003.0f,
107  10.0f, 1002.0f, 12.0f
108  });
109  std::vector<float> outputValues
110  ({
111  1001.0f, 1002.0f, 1003.0f
112  });
113 
114  return ReductionTestCommon<ArmnnType>(workloadFactory,
115  memoryManager,
116  tensorHandleFactory,
117  inputTensorInfo,
118  outputTensorInfo,
119  inputValues,
120  outputValues,
121  { 2 },
123 }
124 
125 template<armnn::DataType ArmnnType, typename T>
127  armnn::IWorkloadFactory& workloadFactory,
129  const armnn::ITensorHandleFactory& tensorHandleFactory)
130 {
131  const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
132  const armnn::TensorShape outputShape{ 1, 1, 2, 1};
133 
134  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
135 
136  if (armnn::IsQuantizedType<T>())
137  {
138  inputTensorInfo.SetQuantizationScale(1.0f);
139  inputTensorInfo.SetQuantizationOffset(0);
140  }
141 
142  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
143 
144  std::vector<float> inputValues
145  ({
146  1001.0f, 11.0f, 1003.0f,
147  10.0f, 1002.0f, 12.0f
148  });
149  std::vector<float> outputValues
150  ({
151  1003.0f, 1002.0f
152  });
153 
154  return ReductionTestCommon<ArmnnType>(workloadFactory,
155  memoryManager,
156  tensorHandleFactory,
157  inputTensorInfo,
158  outputTensorInfo,
159  inputValues,
160  outputValues,
161  { -1 },
163  true);
164 }
165 
166 template<armnn::DataType ArmnnType, typename T>
168  armnn::IWorkloadFactory& workloadFactory,
170  const armnn::ITensorHandleFactory& tensorHandleFactory)
171 {
172  const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
173  const armnn::TensorShape outputShape{ 1, 1, 2, 1 };
174 
175  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
176 
177  if (armnn::IsQuantizedType<T>())
178  {
179  inputTensorInfo.SetQuantizationScale(1.0f);
180  inputTensorInfo.SetQuantizationOffset(0);
181  }
182 
183  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
184 
185  std::vector<float> inputValues
186  ({
187  1.0f, 3.0f, 2.0f,
188  6.0f, 4.0f, 5.0f
189  });
190 
191  std::vector<float> outputValues
192  ({
193  3.0f, 6.0f
194  });
195 
196  return ReductionTestCommon<ArmnnType>(workloadFactory,
197  memoryManager,
198  tensorHandleFactory,
199  inputTensorInfo,
200  outputTensorInfo,
201  inputValues,
202  outputValues,
203  { 3 },
205  true);
206 }
207 
208 template<armnn::DataType ArmnnType, typename T>
210  armnn::IWorkloadFactory& workloadFactory,
212  const armnn::ITensorHandleFactory& tensorHandleFactory)
213 {
214  const armnn::TensorShape inputShape { 1, 1, 2, 3 };
215  const armnn::TensorShape outputShape { 1, 1, 1, 3};
216 
217  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
218 
219  if (armnn::IsQuantizedType<T>())
220  {
221  inputTensorInfo.SetQuantizationScale(1.0f);
222  inputTensorInfo.SetQuantizationOffset(0);
223  }
224 
225  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
226 
227  std::vector<float> inputValues
228  ({
229  1001.0f, 11.0f, 1003.0f,
230  10.0f, 1002.0f, 12.0f
231  });
232  std::vector<float> outputValues
233  ({
234  10.0f, 11.0f, 12.0f
235  });
236 
237  return ReductionTestCommon<ArmnnType>(workloadFactory,
238  memoryManager,
239  tensorHandleFactory,
240  inputTensorInfo,
241  outputTensorInfo,
242  inputValues,
243  outputValues,
244  { 2 },
246 }
247 
248 template<armnn::DataType ArmnnType, typename T>
250  armnn::IWorkloadFactory& workloadFactory,
252  const armnn::ITensorHandleFactory& tensorHandleFactory)
253 {
254  const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
255  const armnn::TensorShape outputShape{ 1, 1, 2, 1};
256 
257  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
258 
259  if (armnn::IsQuantizedType<T>())
260  {
261  inputTensorInfo.SetQuantizationScale(1.0f);
262  inputTensorInfo.SetQuantizationOffset(0);
263  }
264 
265  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
266 
267  std::vector<float> inputValues
268  ({
269  1001.0f, 11.0f, 1003.0f,
270  10.0f, 1002.0f, 12.0f
271  });
272  std::vector<float> outputValues
273  ({
274  11.0f, 10.0f
275  });
276 
277  return ReductionTestCommon<ArmnnType>(workloadFactory,
278  memoryManager,
279  tensorHandleFactory,
280  inputTensorInfo,
281  outputTensorInfo,
282  inputValues,
283  outputValues,
284  { -1 },
286  true);
287 }
288 
289 // Explicit template specializations
291 ReduceMaxSimpleTest<armnn::DataType::Float32>(
292  armnn::IWorkloadFactory& workloadFactory,
294  const armnn::ITensorHandleFactory& tensorHandleFactory);
295 
297 ReduceMaxNegativeAxisTest<armnn::DataType::Float32>(
298  armnn::IWorkloadFactory& workloadFactory,
300  const armnn::ITensorHandleFactory& tensorHandleFactory);
301 
303 ReduceMaxSimpleTest2<armnn::DataType::Float32>(
304  armnn::IWorkloadFactory& workloadFactory,
306  const armnn::ITensorHandleFactory& tensorHandleFactory);
307 
309 ReduceMinSimpleTest<armnn::DataType::Float32>(
310  armnn::IWorkloadFactory& workloadFactory,
312  const armnn::ITensorHandleFactory& tensorHandleFactory);
313 
315 ReduceMinNegativeAxisTest<armnn::DataType::Float32>(
316  armnn::IWorkloadFactory& workloadFactory,
318  const armnn::ITensorHandleFactory& tensorHandleFactory);
319 
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
bool m_KeepDims
if true then output shape has no change.
LayerTestResult< float, 4 > ReduceMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
ReduceOperation
Definition: Types.hpp:130
LayerTestResult< float, 4 > ReduceMinNegativeAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > ReduceMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< float, 4 > ReduceMaxNegativeAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
LayerTestResult< float, 4 > ReduceMaxSimpleTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196