ArmNN
 21.02
ReduceSumTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ReduceSumTestImpl.hpp"
7 
11 
12 #include <test/TensorHelpers.hpp>
13 
14 namespace
15 {
16 
17 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18 LayerTestResult<float, 4> ReduceTestCommon(
19  armnn::IWorkloadFactory& workloadFactory,
21  const armnn::ITensorHandleFactory& tensorHandleFactory,
22  const armnn::TensorInfo inputTensorInfo,
23  const armnn::TensorInfo outputTensorInfo,
24  const std::vector<float>& inputData,
25  const std::vector<float>& outputData,
26  const std::vector<int32_t> vAxis,
27  const armnn::ReduceOperation reduceOperation,
28  bool keepDims = false)
29 {
30  IgnoreUnused(memoryManager);
31  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
32 
33  LayerTestResult<float, 4> result(outputTensorInfo);
34  result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
35 
36  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
37  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
38 
40  std::vector<uint32_t> updated_idx;
41  uint32_t resolvedAxis = 0;
42  for (uint32_t i = 0; i < vAxis.size(); ++i)
43  {
44  if (vAxis[i] < 0)
45  {
46  resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
47  } else
48  {
49  resolvedAxis = static_cast<uint32_t>(vAxis[i]);
50  }
51 
52  updated_idx.push_back(resolvedAxis);
53  }
54 
55  descriptor.m_Parameters.m_vAxis = updated_idx;
56  descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
57  descriptor.m_Parameters.m_KeepDims = keepDims;
59 
60  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
61  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
62 
63  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
64 
65  inputHandle->Allocate();
66  outputHandle->Allocate();
67 
68  CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
69 
70  workload->Execute();
71 
72  CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
73 
74  return result;
75 }
76 
77 } // namespace
78 
79 template<armnn::DataType ArmnnType, typename T>
81  armnn::IWorkloadFactory& workloadFactory,
83  const armnn::ITensorHandleFactory& tensorHandleFactory)
84 {
85  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
86  const armnn::TensorShape outputShape{ 1, 1, 1, 1};
87 
88  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
89 
90  if (armnn::IsQuantizedType<T>())
91  {
92  inputTensorInfo.SetQuantizationScale(1.0f);
93  inputTensorInfo.SetQuantizationOffset(0);
94  }
95 
96  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
97 
98  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
99  std::vector<float> outputValues({ 34.0f });
100 
101  return ReduceTestCommon<ArmnnType>(workloadFactory,
102  memoryManager,
103  tensorHandleFactory,
104  inputTensorInfo,
105  outputTensorInfo,
106  inputValues,
107  outputValues,
108  { -1 },
110 }
111 
112 template<armnn::DataType ArmnnType, typename T>
114  armnn::IWorkloadFactory& workloadFactory,
116  const armnn::ITensorHandleFactory& tensorHandleFactory)
117 {
118  const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
119  const armnn::TensorShape outputShape{ 1, 1, 2, 4};
120 
121  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
122 
123  if (armnn::IsQuantizedType<T>())
124  {
125  inputTensorInfo.SetQuantizationScale(1.0f);
126  inputTensorInfo.SetQuantizationOffset(0);
127  }
128 
129  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
130 
131  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
132  5.0f, 6.0f, 7.0f, 8.0f,
133 
134  10.0f, 20.0f, 30.0f, 40.0f,
135  50.0f, 60.0f, 70.0f, 80.0f,
136 
137  100.0f, 200.0f, 300.0f, 400.0f,
138  500.0f, 600.0f, 700.0f, 800.0f });
139  std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
140  555.0f, 666.0f, 777.0f, 888.0f });
141 
142  return ReduceTestCommon<ArmnnType>(workloadFactory,
143  memoryManager,
144  tensorHandleFactory,
145  inputTensorInfo,
146  outputTensorInfo,
147  inputValues,
148  outputValues,
149  { 1 },
151 }
152 
153 template<armnn::DataType ArmnnType, typename T>
155  armnn::IWorkloadFactory& workloadFactory,
157  const armnn::ITensorHandleFactory& tensorHandleFactory)
158 {
159  const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
160  const armnn::TensorShape outputShape{ 1, 1, 3, 4};
161 
162  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
163 
164  if (armnn::IsQuantizedType<T>())
165  {
166  inputTensorInfo.SetQuantizationScale(1.0f);
167  inputTensorInfo.SetQuantizationOffset(0);
168  }
169 
170  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
171 
172  std::vector<float> inputValues( {7, 8, 6, 1,
173  1, 1, 8, 7,
174  3, 7, 7, 7,
175 
176  6, 8, 4, 7,
177  3, 8, 7, 3,
178  5, 8, 8, 8,
179 
180 
181  7, 8, 2, 7,
182  3, 8, 5, 6,
183  8, 4, 2, 7,
184 
185  1, 6, 7, 2,
186  8, 3, 3, 1,
187  7, 6, 2, 6,
188 
189 
190  5, 3, 4, 8,
191  7, 8, 2, 4,
192  6, 6, 2, 8,
193 
194  2, 2, 7, 2,
195  5, 3, 6, 3,
196  6, 1, 8, 8});
197  std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
198  27.0f, 31.0f, 31.0f, 24.0f,
199  35.0f, 32.0f, 29.0f, 44.0f});
200 
201  return ReduceTestCommon<ArmnnType>(workloadFactory,
202  memoryManager,
203  tensorHandleFactory,
204  inputTensorInfo,
205  outputTensorInfo,
206  inputValues,
207  outputValues,
208  { 1 },
210 }
211 
212 template<armnn::DataType ArmnnType, typename T>
214  armnn::IWorkloadFactory& workloadFactory,
216  const armnn::ITensorHandleFactory& tensorHandleFactory)
217 {
218  const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
219  const armnn::TensorShape outputShape{ 1, 6, 3, 1};
220 
221  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
222 
223  if (armnn::IsQuantizedType<T>())
224  {
225  inputTensorInfo.SetQuantizationScale(1.0f);
226  inputTensorInfo.SetQuantizationOffset(0);
227  }
228 
229  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
230 
231  std::vector<float> inputValues( {7, 8, 6, 1,
232  1, 1, 8, 7,
233  3, 7, 7, 7,
234 
235  6, 8, 4, 7,
236  3, 8, 7, 3,
237  5, 8, 8, 8,
238 
239 
240  7, 8, 2, 7,
241  3, 8, 5, 6,
242  8, 4, 2, 7,
243 
244  1, 6, 7, 2,
245  8, 3, 3, 1,
246  7, 6, 2, 6,
247 
248 
249  5, 3, 4, 8,
250  7, 8, 2, 4,
251  6, 6, 2, 8,
252 
253  2, 2, 7, 2,
254  5, 3, 6, 3,
255  6, 1, 8, 8});
256  std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
257  25.0f, 21.0f, 29.0f,
258 
259  24.0f, 22.0f, 21.0f,
260  16.0f, 15.0f, 21.0f,
261 
262  20.0f, 21.0f, 22.0f,
263  13.0f, 17.0f, 23.0f});
264 
265  return ReduceTestCommon<ArmnnType>(workloadFactory,
266  memoryManager,
267  tensorHandleFactory,
268  inputTensorInfo,
269  outputTensorInfo,
270  inputValues,
271  outputValues,
272  { 3 },
274  true);
275 }
276 
277 template<armnn::DataType ArmnnType, typename T>
279  armnn::IWorkloadFactory& workloadFactory,
281  const armnn::ITensorHandleFactory& tensorHandleFactory)
282 {
283  const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
284  const armnn::TensorShape outputShape{ 1, 1, 1, 4};
285 
286  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
287 
288  if (armnn::IsQuantizedType<T>())
289  {
290  inputTensorInfo.SetQuantizationScale(1.0f);
291  inputTensorInfo.SetQuantizationOffset(0);
292  }
293 
294  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
295 
296  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
297  5.0f, 6.0f, 7.0f, 8.0f,
298 
299  10.0f, 20.0f, 30.0f, 40.0f,
300  50.0f, 60.0f, 70.0f, 80.0f,
301 
302  100.0f, 200.0f, 300.0f, 400.0f,
303  500.0f, 600.0f, 700.0f, 800.0f });
304  std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
305 
306  return ReduceTestCommon<ArmnnType>(workloadFactory,
307  memoryManager,
308  tensorHandleFactory,
309  inputTensorInfo,
310  outputTensorInfo,
311  inputValues,
312  outputValues,
313  { 1, 2 },
315 }
316 
317 // Explicit template specializations
318 
320 ReduceSumSimpleTest<armnn::DataType::Float32>(
321  armnn::IWorkloadFactory& workloadFactory,
323  const armnn::ITensorHandleFactory& tensorHandleFactory);
324 
326 ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
327  armnn::IWorkloadFactory& workloadFactory,
329  const armnn::ITensorHandleFactory& tensorHandleFactory);
330 
332 ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
333  armnn::IWorkloadFactory& workloadFactory,
335  const armnn::ITensorHandleFactory& tensorHandleFactory);
336 
338 ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
339  armnn::IWorkloadFactory& workloadFactory,
341  const armnn::ITensorHandleFactory& tensorHandleFactory);
342 
344 ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
345  armnn::IWorkloadFactory& workloadFactory,
347  const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult< float, 4 > ReduceSumMultipleAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_KeepDims
if true then output shape has no change.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
ReduceOperation
Definition: Types.hpp:111
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
LayerTestResult< float, 4 > ReduceSumSingleAxisTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
LayerTestResult< float, 4 > ReduceSumSingleAxisTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)