ArmNN
 22.02
ReduceSumTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ReduceSumTestImpl.hpp"
7 
8 #include <DataTypeUtils.hpp>
11 
13 
14 namespace
15 {
16 
17 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18 LayerTestResult<float, 4> ReduceTestCommon(
19  armnn::IWorkloadFactory& workloadFactory,
21  const armnn::ITensorHandleFactory& tensorHandleFactory,
22  const armnn::TensorInfo inputTensorInfo,
23  const armnn::TensorInfo outputTensorInfo,
24  const std::vector<float>& inputData,
25  const std::vector<float>& outputData,
26  const std::vector<int32_t> vAxis,
27  const armnn::ReduceOperation reduceOperation,
28  bool keepDims = false)
29 {
30  IgnoreUnused(memoryManager);
31  auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
32 
33  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
34 
35  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37 
39  std::vector<uint32_t> updated_idx;
40  uint32_t resolvedAxis = 0;
41  for (uint32_t i = 0; i < vAxis.size(); ++i)
42  {
43  if (vAxis[i] < 0)
44  {
45  resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
46  } else
47  {
48  resolvedAxis = static_cast<uint32_t>(vAxis[i]);
49  }
50 
51  updated_idx.push_back(resolvedAxis);
52  }
53 
54  descriptor.m_Parameters.m_vAxis = updated_idx;
55  descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
56  descriptor.m_Parameters.m_KeepDims = keepDims;
58 
59  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
61 
62  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
63  descriptor,
64  info);
65 
66  inputHandle->Allocate();
67  outputHandle->Allocate();
68 
69  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
70 
71  workload->Execute();
72 
73  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
74 
75  return LayerTestResult<float, 4>(actualOutput,
76  outputData,
77  outputHandle->GetShape(),
78  outputTensorInfo.GetShape());
79 }
80 
81 } // namespace
82 
83 template<armnn::DataType ArmnnType, typename T>
85  armnn::IWorkloadFactory& workloadFactory,
87  const armnn::ITensorHandleFactory& tensorHandleFactory)
88 {
89  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
90  const armnn::TensorShape outputShape{ 1, 1, 1, 1};
91 
92  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
93 
94  if (armnn::IsQuantizedType<T>())
95  {
96  inputTensorInfo.SetQuantizationScale(1.0f);
97  inputTensorInfo.SetQuantizationOffset(0);
98  }
99 
100  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
101 
102  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
103  std::vector<float> outputValues({ 34.0f });
104 
105  return ReduceTestCommon<ArmnnType>(workloadFactory,
106  memoryManager,
107  tensorHandleFactory,
108  inputTensorInfo,
109  outputTensorInfo,
110  inputValues,
111  outputValues,
112  { -1 },
114 }
115 
116 template<armnn::DataType ArmnnType, typename T>
118  armnn::IWorkloadFactory& workloadFactory,
120  const armnn::ITensorHandleFactory& tensorHandleFactory)
121 {
122  const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
123  const armnn::TensorShape outputShape{ 1, 1, 2, 4};
124 
125  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
126 
127  if (armnn::IsQuantizedType<T>())
128  {
129  inputTensorInfo.SetQuantizationScale(1.0f);
130  inputTensorInfo.SetQuantizationOffset(0);
131  }
132 
133  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
134 
135  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
136  5.0f, 6.0f, 7.0f, 8.0f,
137 
138  10.0f, 20.0f, 30.0f, 40.0f,
139  50.0f, 60.0f, 70.0f, 80.0f,
140 
141  100.0f, 200.0f, 300.0f, 400.0f,
142  500.0f, 600.0f, 700.0f, 800.0f });
143  std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
144  555.0f, 666.0f, 777.0f, 888.0f });
145 
146  return ReduceTestCommon<ArmnnType>(workloadFactory,
147  memoryManager,
148  tensorHandleFactory,
149  inputTensorInfo,
150  outputTensorInfo,
151  inputValues,
152  outputValues,
153  { 1 },
155 }
156 
157 template<armnn::DataType ArmnnType, typename T>
159  armnn::IWorkloadFactory& workloadFactory,
161  const armnn::ITensorHandleFactory& tensorHandleFactory)
162 {
163  const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
164  const armnn::TensorShape outputShape{ 1, 1, 3, 4};
165 
166  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
167 
168  if (armnn::IsQuantizedType<T>())
169  {
170  inputTensorInfo.SetQuantizationScale(1.0f);
171  inputTensorInfo.SetQuantizationOffset(0);
172  }
173 
174  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
175 
176  std::vector<float> inputValues( {7, 8, 6, 1,
177  1, 1, 8, 7,
178  3, 7, 7, 7,
179 
180  6, 8, 4, 7,
181  3, 8, 7, 3,
182  5, 8, 8, 8,
183 
184 
185  7, 8, 2, 7,
186  3, 8, 5, 6,
187  8, 4, 2, 7,
188 
189  1, 6, 7, 2,
190  8, 3, 3, 1,
191  7, 6, 2, 6,
192 
193 
194  5, 3, 4, 8,
195  7, 8, 2, 4,
196  6, 6, 2, 8,
197 
198  2, 2, 7, 2,
199  5, 3, 6, 3,
200  6, 1, 8, 8});
201  std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
202  27.0f, 31.0f, 31.0f, 24.0f,
203  35.0f, 32.0f, 29.0f, 44.0f});
204 
205  return ReduceTestCommon<ArmnnType>(workloadFactory,
206  memoryManager,
207  tensorHandleFactory,
208  inputTensorInfo,
209  outputTensorInfo,
210  inputValues,
211  outputValues,
212  { 1 },
214 }
215 
216 template<armnn::DataType ArmnnType, typename T>
218  armnn::IWorkloadFactory& workloadFactory,
220  const armnn::ITensorHandleFactory& tensorHandleFactory)
221 {
222  const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
223  const armnn::TensorShape outputShape{ 1, 6, 3, 1};
224 
225  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
226 
227  if (armnn::IsQuantizedType<T>())
228  {
229  inputTensorInfo.SetQuantizationScale(1.0f);
230  inputTensorInfo.SetQuantizationOffset(0);
231  }
232 
233  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
234 
235  std::vector<float> inputValues( {7, 8, 6, 1,
236  1, 1, 8, 7,
237  3, 7, 7, 7,
238 
239  6, 8, 4, 7,
240  3, 8, 7, 3,
241  5, 8, 8, 8,
242 
243 
244  7, 8, 2, 7,
245  3, 8, 5, 6,
246  8, 4, 2, 7,
247 
248  1, 6, 7, 2,
249  8, 3, 3, 1,
250  7, 6, 2, 6,
251 
252 
253  5, 3, 4, 8,
254  7, 8, 2, 4,
255  6, 6, 2, 8,
256 
257  2, 2, 7, 2,
258  5, 3, 6, 3,
259  6, 1, 8, 8});
260  std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
261  25.0f, 21.0f, 29.0f,
262 
263  24.0f, 22.0f, 21.0f,
264  16.0f, 15.0f, 21.0f,
265 
266  20.0f, 21.0f, 22.0f,
267  13.0f, 17.0f, 23.0f});
268 
269  return ReduceTestCommon<ArmnnType>(workloadFactory,
270  memoryManager,
271  tensorHandleFactory,
272  inputTensorInfo,
273  outputTensorInfo,
274  inputValues,
275  outputValues,
276  { 3 },
278  true);
279 }
280 
281 template<armnn::DataType ArmnnType, typename T>
283  armnn::IWorkloadFactory& workloadFactory,
285  const armnn::ITensorHandleFactory& tensorHandleFactory)
286 {
287  const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
288  const armnn::TensorShape outputShape{ 1, 1, 1, 4};
289 
290  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
291 
292  if (armnn::IsQuantizedType<T>())
293  {
294  inputTensorInfo.SetQuantizationScale(1.0f);
295  inputTensorInfo.SetQuantizationOffset(0);
296  }
297 
298  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
299 
300  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
301  5.0f, 6.0f, 7.0f, 8.0f,
302 
303  10.0f, 20.0f, 30.0f, 40.0f,
304  50.0f, 60.0f, 70.0f, 80.0f,
305 
306  100.0f, 200.0f, 300.0f, 400.0f,
307  500.0f, 600.0f, 700.0f, 800.0f });
308  std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
309 
310  return ReduceTestCommon<ArmnnType>(workloadFactory,
311  memoryManager,
312  tensorHandleFactory,
313  inputTensorInfo,
314  outputTensorInfo,
315  inputValues,
316  outputValues,
317  { 1, 2 },
319 }
320 
321 // Explicit template specializations
322 
324 ReduceSumSimpleTest<armnn::DataType::Float32>(
325  armnn::IWorkloadFactory& workloadFactory,
327  const armnn::ITensorHandleFactory& tensorHandleFactory);
328 
330 ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
331  armnn::IWorkloadFactory& workloadFactory,
333  const armnn::ITensorHandleFactory& tensorHandleFactory);
334 
336 ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
337  armnn::IWorkloadFactory& workloadFactory,
339  const armnn::ITensorHandleFactory& tensorHandleFactory);
340 
342 ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
343  armnn::IWorkloadFactory& workloadFactory,
345  const armnn::ITensorHandleFactory& tensorHandleFactory);
346 
348 ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
349  armnn::IWorkloadFactory& workloadFactory,
351  const armnn::ITensorHandleFactory& tensorHandleFactory);
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< float, 4 > ReduceSumMultipleAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_KeepDims
if true then output shape has no change.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
ReduceOperation
Definition: Types.hpp:130
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< float, 4 > ReduceSumSingleAxisTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > ReduceSumSingleAxisTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196