ArmNN
 21.08
ReduceSumTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ReduceSumTestImpl.hpp"
7 
11 
12 #include <test/TensorHelpers.hpp>
13 
14 namespace
15 {
16 
17 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18 LayerTestResult<float, 4> ReduceTestCommon(
19  armnn::IWorkloadFactory& workloadFactory,
21  const armnn::ITensorHandleFactory& tensorHandleFactory,
22  const armnn::TensorInfo inputTensorInfo,
23  const armnn::TensorInfo outputTensorInfo,
24  const std::vector<float>& inputData,
25  const std::vector<float>& outputData,
26  const std::vector<int32_t> vAxis,
27  const armnn::ReduceOperation reduceOperation,
28  bool keepDims = false)
29 {
30  IgnoreUnused(memoryManager);
31  auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
32 
33  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
34 
35  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37 
39  std::vector<uint32_t> updated_idx;
40  uint32_t resolvedAxis = 0;
41  for (uint32_t i = 0; i < vAxis.size(); ++i)
42  {
43  if (vAxis[i] < 0)
44  {
45  resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
46  } else
47  {
48  resolvedAxis = static_cast<uint32_t>(vAxis[i]);
49  }
50 
51  updated_idx.push_back(resolvedAxis);
52  }
53 
54  descriptor.m_Parameters.m_vAxis = updated_idx;
55  descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
56  descriptor.m_Parameters.m_KeepDims = keepDims;
58 
59  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
61 
62  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
63 
64  inputHandle->Allocate();
65  outputHandle->Allocate();
66 
67  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
68 
69  workload->Execute();
70 
71  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
72 
73  return LayerTestResult<float, 4>(actualOutput,
74  outputData,
75  outputHandle->GetShape(),
76  outputTensorInfo.GetShape());
77 }
78 
79 } // namespace
80 
81 template<armnn::DataType ArmnnType, typename T>
83  armnn::IWorkloadFactory& workloadFactory,
85  const armnn::ITensorHandleFactory& tensorHandleFactory)
86 {
87  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
88  const armnn::TensorShape outputShape{ 1, 1, 1, 1};
89 
90  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
91 
92  if (armnn::IsQuantizedType<T>())
93  {
94  inputTensorInfo.SetQuantizationScale(1.0f);
95  inputTensorInfo.SetQuantizationOffset(0);
96  }
97 
98  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
99 
100  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
101  std::vector<float> outputValues({ 34.0f });
102 
103  return ReduceTestCommon<ArmnnType>(workloadFactory,
104  memoryManager,
105  tensorHandleFactory,
106  inputTensorInfo,
107  outputTensorInfo,
108  inputValues,
109  outputValues,
110  { -1 },
112 }
113 
114 template<armnn::DataType ArmnnType, typename T>
116  armnn::IWorkloadFactory& workloadFactory,
118  const armnn::ITensorHandleFactory& tensorHandleFactory)
119 {
120  const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
121  const armnn::TensorShape outputShape{ 1, 1, 2, 4};
122 
123  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
124 
125  if (armnn::IsQuantizedType<T>())
126  {
127  inputTensorInfo.SetQuantizationScale(1.0f);
128  inputTensorInfo.SetQuantizationOffset(0);
129  }
130 
131  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
132 
133  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
134  5.0f, 6.0f, 7.0f, 8.0f,
135 
136  10.0f, 20.0f, 30.0f, 40.0f,
137  50.0f, 60.0f, 70.0f, 80.0f,
138 
139  100.0f, 200.0f, 300.0f, 400.0f,
140  500.0f, 600.0f, 700.0f, 800.0f });
141  std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
142  555.0f, 666.0f, 777.0f, 888.0f });
143 
144  return ReduceTestCommon<ArmnnType>(workloadFactory,
145  memoryManager,
146  tensorHandleFactory,
147  inputTensorInfo,
148  outputTensorInfo,
149  inputValues,
150  outputValues,
151  { 1 },
153 }
154 
155 template<armnn::DataType ArmnnType, typename T>
157  armnn::IWorkloadFactory& workloadFactory,
159  const armnn::ITensorHandleFactory& tensorHandleFactory)
160 {
161  const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
162  const armnn::TensorShape outputShape{ 1, 1, 3, 4};
163 
164  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
165 
166  if (armnn::IsQuantizedType<T>())
167  {
168  inputTensorInfo.SetQuantizationScale(1.0f);
169  inputTensorInfo.SetQuantizationOffset(0);
170  }
171 
172  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
173 
174  std::vector<float> inputValues( {7, 8, 6, 1,
175  1, 1, 8, 7,
176  3, 7, 7, 7,
177 
178  6, 8, 4, 7,
179  3, 8, 7, 3,
180  5, 8, 8, 8,
181 
182 
183  7, 8, 2, 7,
184  3, 8, 5, 6,
185  8, 4, 2, 7,
186 
187  1, 6, 7, 2,
188  8, 3, 3, 1,
189  7, 6, 2, 6,
190 
191 
192  5, 3, 4, 8,
193  7, 8, 2, 4,
194  6, 6, 2, 8,
195 
196  2, 2, 7, 2,
197  5, 3, 6, 3,
198  6, 1, 8, 8});
199  std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
200  27.0f, 31.0f, 31.0f, 24.0f,
201  35.0f, 32.0f, 29.0f, 44.0f});
202 
203  return ReduceTestCommon<ArmnnType>(workloadFactory,
204  memoryManager,
205  tensorHandleFactory,
206  inputTensorInfo,
207  outputTensorInfo,
208  inputValues,
209  outputValues,
210  { 1 },
212 }
213 
214 template<armnn::DataType ArmnnType, typename T>
216  armnn::IWorkloadFactory& workloadFactory,
218  const armnn::ITensorHandleFactory& tensorHandleFactory)
219 {
220  const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
221  const armnn::TensorShape outputShape{ 1, 6, 3, 1};
222 
223  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
224 
225  if (armnn::IsQuantizedType<T>())
226  {
227  inputTensorInfo.SetQuantizationScale(1.0f);
228  inputTensorInfo.SetQuantizationOffset(0);
229  }
230 
231  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
232 
233  std::vector<float> inputValues( {7, 8, 6, 1,
234  1, 1, 8, 7,
235  3, 7, 7, 7,
236 
237  6, 8, 4, 7,
238  3, 8, 7, 3,
239  5, 8, 8, 8,
240 
241 
242  7, 8, 2, 7,
243  3, 8, 5, 6,
244  8, 4, 2, 7,
245 
246  1, 6, 7, 2,
247  8, 3, 3, 1,
248  7, 6, 2, 6,
249 
250 
251  5, 3, 4, 8,
252  7, 8, 2, 4,
253  6, 6, 2, 8,
254 
255  2, 2, 7, 2,
256  5, 3, 6, 3,
257  6, 1, 8, 8});
258  std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
259  25.0f, 21.0f, 29.0f,
260 
261  24.0f, 22.0f, 21.0f,
262  16.0f, 15.0f, 21.0f,
263 
264  20.0f, 21.0f, 22.0f,
265  13.0f, 17.0f, 23.0f});
266 
267  return ReduceTestCommon<ArmnnType>(workloadFactory,
268  memoryManager,
269  tensorHandleFactory,
270  inputTensorInfo,
271  outputTensorInfo,
272  inputValues,
273  outputValues,
274  { 3 },
276  true);
277 }
278 
279 template<armnn::DataType ArmnnType, typename T>
281  armnn::IWorkloadFactory& workloadFactory,
283  const armnn::ITensorHandleFactory& tensorHandleFactory)
284 {
285  const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
286  const armnn::TensorShape outputShape{ 1, 1, 1, 4};
287 
288  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
289 
290  if (armnn::IsQuantizedType<T>())
291  {
292  inputTensorInfo.SetQuantizationScale(1.0f);
293  inputTensorInfo.SetQuantizationOffset(0);
294  }
295 
296  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
297 
298  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
299  5.0f, 6.0f, 7.0f, 8.0f,
300 
301  10.0f, 20.0f, 30.0f, 40.0f,
302  50.0f, 60.0f, 70.0f, 80.0f,
303 
304  100.0f, 200.0f, 300.0f, 400.0f,
305  500.0f, 600.0f, 700.0f, 800.0f });
306  std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
307 
308  return ReduceTestCommon<ArmnnType>(workloadFactory,
309  memoryManager,
310  tensorHandleFactory,
311  inputTensorInfo,
312  outputTensorInfo,
313  inputValues,
314  outputValues,
315  { 1, 2 },
317 }
318 
319 // Explicit template specializations
320 
322 ReduceSumSimpleTest<armnn::DataType::Float32>(
323  armnn::IWorkloadFactory& workloadFactory,
325  const armnn::ITensorHandleFactory& tensorHandleFactory);
326 
328 ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
329  armnn::IWorkloadFactory& workloadFactory,
331  const armnn::ITensorHandleFactory& tensorHandleFactory);
332 
334 ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
335  armnn::IWorkloadFactory& workloadFactory,
337  const armnn::ITensorHandleFactory& tensorHandleFactory);
338 
340 ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
341  armnn::IWorkloadFactory& workloadFactory,
343  const armnn::ITensorHandleFactory& tensorHandleFactory);
344 
346 ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
347  armnn::IWorkloadFactory& workloadFactory,
349  const armnn::ITensorHandleFactory& tensorHandleFactory);
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< float, 4 > ReduceSumMultipleAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_KeepDims
if true then output shape has no change.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
ReduceOperation
Definition: Types.hpp:123
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< float, 4 > ReduceSumSingleAxisTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > ReduceSumSingleAxisTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)