ArmNN
 21.02
SplitterTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SplitterTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
14 
15 #include <test/TensorHelpers.hpp>
16 
17 namespace
18 {
19 
20 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
22  armnn::IWorkloadFactory& workloadFactory,
24  const armnn::ITensorHandleFactory& tensorHandleFactory,
25  float qScale = 0.0f,
26  int32_t qOffset = 0)
27 {
28  IgnoreUnused(memoryManager);
29  unsigned int inputWidth = 5;
30  unsigned int inputHeight = 6;
31  unsigned int inputChannels = 3;
32 
33  // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
34  // cannot be split.
35  // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
36  //
37  // This test has therefore been recast to split the channels, then split the resulting subtensor.
38 
39  // To take channel 0 of original output
40  // and channel 0 and channel 1 of the split subtensor.
41  unsigned int outputWidth1 = inputWidth;
42  unsigned int outputHeight1 = inputHeight;
43  unsigned int outputChannels1 = 1;
44 
45  // To take channel 1 and 2 of the original output.
46  unsigned int outputWidth2 = inputWidth;
47  unsigned int outputHeight2 = inputHeight;
48  unsigned int outputChannels2 = 2;
49 
50 
51  // Define the tensor descriptors.
52  armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
53 
54  // Outputs of the original split.
55  armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
56  armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
57 
58  // Outputs of the subsequent subtensor split.
59  armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
60  armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
61 
62  // Set quantization parameters if the requested type is a quantized type.
63  // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
64  if(armnn::IsQuantizedType<T>())
65  {
66  inputTensorInfo.SetQuantizationScale(qScale);
67  inputTensorInfo.SetQuantizationOffset(qOffset);
68  outputTensorInfo1.SetQuantizationScale(qScale);
69  outputTensorInfo1.SetQuantizationOffset(qOffset);
70  outputTensorInfo2.SetQuantizationScale(qScale);
71  outputTensorInfo2.SetQuantizationOffset(qOffset);
72  outputTensorInfo3.SetQuantizationScale(qScale);
73  outputTensorInfo3.SetQuantizationOffset(qOffset);
74  outputTensorInfo4.SetQuantizationScale(qScale);
75  outputTensorInfo4.SetQuantizationOffset(qOffset);
76  }
77 
78  LayerTestResult<T,3> ret1(outputTensorInfo1);
79  LayerTestResult<T,3> ret2(outputTensorInfo2);
80  LayerTestResult<T,3> ret3(outputTensorInfo3);
81  LayerTestResult<T,3> ret4(outputTensorInfo4);
82 
83  auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
84  armnnUtils::QuantizedVector<T>({
85  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
86  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
87  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
88  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
89  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
90  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
91 
92  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
93  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
94  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
95  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
96  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
97  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
98 
99  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
100  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
101  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
102  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
103  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
104  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
105  },
106  qScale, qOffset)
107  ));
108 
109  // Channel 0 of the original input.
110  ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
111  armnnUtils::QuantizedVector<T>({
112  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
113  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
114  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
115  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
116  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
117  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
118  },
119  qScale, qOffset)
120  ));
121 
122  // Channel 1 & 2 of the original input.
123  ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
124  armnnUtils::QuantizedVector<T>({
125  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
126  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
127  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
128  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
129  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
130  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
131 
132  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
133  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
134  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
135  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
136  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
137  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
138  },
139  qScale, qOffset)
140  ));
141 
142  // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
143  ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
144  armnnUtils::QuantizedVector<T>({
145  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
146  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
147  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
148  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
149  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
150  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
151  },
152  qScale, qOffset)
153  ));
154 
155  // Channel 1 of return 2.
156  ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
157  armnnUtils::QuantizedVector<T>({
158  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
159  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
160  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
161  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
162  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
163  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
164  },
165  qScale, qOffset)
166  ));
167 
168  // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
169  // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
170  // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
171  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
173 
174  std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
176 
177  std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
179 
180  std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
182 
183  bool subTensorsSupported = tensorHandleFactory.SupportsSubTensors();
184  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
185 
186  std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
187  subTensorsSupported ?
188  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
189  tensorHandleFactory.CreateTensorHandle(outputTensorInfo1);
190 
191  std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
192  subTensorsSupported ?
193  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
194  tensorHandleFactory.CreateTensorHandle(outputTensorInfo2);
195 
196  std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
197  subTensorsSupported ?
198  tensorHandleFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
199  tensorHandleFactory.CreateTensorHandle(outputTensorInfo3);
200 
201  std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
202  subTensorsSupported ?
203  tensorHandleFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
204  tensorHandleFactory.CreateTensorHandle(outputTensorInfo4);
205 
206  // Do the first split
209  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
210  AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
211  AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
212 
213  data.m_ViewOrigins.push_back(window1);
214  data.m_ViewOrigins.push_back(window2);
215 
216  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
217 
218  inputHandle->Allocate();
219  outputHandle1->Allocate();
220  outputHandle2->Allocate();
221 
222  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
223 
224  workload->Execute();
225 
226  CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
227  CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
228 
229  // Do the second split.
231  armnn::WorkloadInfo info2;
232  AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
233  AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
234  AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
235 
236  data2.m_ViewOrigins.push_back(window3);
237  data2.m_ViewOrigins.push_back(window4);
238 
239  std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
240 
241  outputHandle3->Allocate();
242  outputHandle4->Allocate();
243 
244  ExecuteWorkload(*workload2, memoryManager);
245 
246  CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
247  CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
248 
249  std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
250 
251  return ret;
252 }
253 
254 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
255 LayerTestResult<T, 3> CopyViaSplitterTestImpl(
256  armnn::IWorkloadFactory& workloadFactory,
258  const armnn::ITensorHandleFactory& tensorHandleFactory,
259  float qScale, int32_t qOffset)
260 {
261  IgnoreUnused(memoryManager);
262  const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
263  auto input = MakeTensor<T, 3>(
264  tensorInfo,
265  armnnUtils::QuantizedVector<T>({
266  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
267  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
268  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
269  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
270  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
271  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
272 
273  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
274  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
275  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
276  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
277  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
278  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
279 
280  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
281  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
282  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
283  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
284  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
285  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
286  },
287  qScale, qOffset));
288 
289  std::vector<unsigned int> origin = { 0, 0, 0 };
291 
292  const bool subTensorsSupported = tensorHandleFactory.SupportsSubTensors();
293  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
294 
295  std::unique_ptr<armnn::ITensorHandle> outputHandle =
296  subTensorsSupported ?
297  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
298  tensorHandleFactory.CreateTensorHandle(tensorInfo);
299 
301  armnn::WorkloadInfo info;
302  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
303  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
304 
305  data.m_ViewOrigins.push_back(window);
306 
307  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
308 
309  inputHandle->Allocate();
310  outputHandle->Allocate();
311 
312  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
313 
314  workload->Execute();
315 
316  LayerTestResult<T, 3> ret(tensorInfo);
317  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
318  ret.outputExpected = input;
319 
320  return ret;
321 }
322 
323 } // anonymous namespace
324 
325 std::vector<LayerTestResult<float,3>> SplitterFloat32Test(
326  armnn::IWorkloadFactory& workloadFactory,
328  const armnn::ITensorHandleFactory& tensorHandleFactory)
329 {
330  return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory);
331 }
332 
333 std::vector<LayerTestResult<armnn::Half,3>> SplitterFloat16Test(
334  armnn::IWorkloadFactory& workloadFactory,
336  const armnn::ITensorHandleFactory& tensorHandleFactory)
337 {
338  return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory);
339 }
340 
341 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
342  armnn::IWorkloadFactory& workloadFactory,
344  const armnn::ITensorHandleFactory& tensorHandleFactory)
345 {
346  return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
347 }
348 
349 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
350  armnn::IWorkloadFactory& workloadFactory,
352  const armnn::ITensorHandleFactory& tensorHandleFactory)
353 {
354  return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
355 }
356 
358  armnn::IWorkloadFactory& workloadFactory,
360  const armnn::ITensorHandleFactory& tensorHandleFactory)
361 {
362  return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory,
363  memoryManager,
364  tensorHandleFactory,
365  0.0f,
366  0);
367 }
368 
370  armnn::IWorkloadFactory& workloadFactory,
372  const armnn::ITensorHandleFactory& tensorHandleFactory)
373 {
374  return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory,
375  memoryManager,
376  tensorHandleFactory,
377  0.0f,
378  0);
379 }
380 
382  armnn::IWorkloadFactory& workloadFactory,
384  const armnn::ITensorHandleFactory& tensorHandleFactory)
385 {
386  return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory,
387  memoryManager,
388  tensorHandleFactory,
389  1.0f,
390  0);
391 }
392 
394  armnn::IWorkloadFactory& workloadFactory,
396  const armnn::ITensorHandleFactory& tensorHandleFactory)
397 {
398  return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory,
399  memoryManager,
400  tensorHandleFactory,
401  1.0f,
402  0);
403 }
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
virtual bool SupportsSubTensors() const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)