ArmNN
 22.05
SplitterTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SplitterTestImpl.hpp"
7 
9 #include <ResolveType.hpp>
10 
11 
14 
16 
17 namespace
18 {
19 
20 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
22  armnn::IWorkloadFactory& workloadFactory,
24  const armnn::ITensorHandleFactory& tensorHandleFactory,
25  float qScale = 0.0f,
26  int32_t qOffset = 0)
27 {
28  IgnoreUnused(memoryManager);
29  unsigned int inputWidth = 5;
30  unsigned int inputHeight = 6;
31  unsigned int inputChannels = 3;
32 
33  // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
34  // cannot be split.
35  // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
36  //
37  // This test has therefore been recast to split the channels, then split the resulting subtensor.
38 
39  // To take channel 0 of original output
40  // and channel 0 and channel 1 of the split subtensor.
41  unsigned int outputWidth1 = inputWidth;
42  unsigned int outputHeight1 = inputHeight;
43  unsigned int outputChannels1 = 1;
44 
45  // To take channel 1 and 2 of the original output.
46  unsigned int outputWidth2 = inputWidth;
47  unsigned int outputHeight2 = inputHeight;
48  unsigned int outputChannels2 = 2;
49 
50  // Define the tensor descriptors.
51  armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
52 
53  // Outputs of the original split.
54  armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55  armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
56 
57  // Outputs of the subsequent subtensor split.
58  armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59  armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
60 
61  // Set quantization parameters if the requested type is a quantized type.
62  // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
63  if(armnn::IsQuantizedType<T>())
64  {
65  inputTensorInfo.SetQuantizationScale(qScale);
66  inputTensorInfo.SetQuantizationOffset(qOffset);
67  outputTensorInfo1.SetQuantizationScale(qScale);
68  outputTensorInfo1.SetQuantizationOffset(qOffset);
69  outputTensorInfo2.SetQuantizationScale(qScale);
70  outputTensorInfo2.SetQuantizationOffset(qOffset);
71  outputTensorInfo3.SetQuantizationScale(qScale);
72  outputTensorInfo3.SetQuantizationOffset(qOffset);
73  outputTensorInfo4.SetQuantizationScale(qScale);
74  outputTensorInfo4.SetQuantizationOffset(qOffset);
75  }
76 
77  auto input = armnnUtils::QuantizedVector<T>(
78  {
79  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
80  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
81  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
82  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
83  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
84  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
85 
86  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
87  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
88  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
89  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
90  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
91  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
92 
93  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
94  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
95  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
96  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
97  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
98  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
99  },
100  qScale, qOffset);
101 
102  // Channel 0 of the original input.
103  auto expectedData1 = armnnUtils::QuantizedVector<T>(
104  {
105  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
106  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
107  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
108  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
109  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
110  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
111  },
112  qScale, qOffset);
113 
114  // Channel 1 & 2 of the original input.
115  auto expectedData2 = armnnUtils::QuantizedVector<T>(
116  {
117  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
118  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
119  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
120  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
121  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
122  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
123 
124  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
125  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
126  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
127  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
128  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
129  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
130  },
131  qScale, qOffset);
132 
133  // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
134  auto expectedData3 = armnnUtils::QuantizedVector<T>(
135  {
136  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
137  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
138  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
139  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
140  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
141  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
142  },
143  qScale, qOffset);
144 
145  // Channel 1 of return 2.
146  auto expectedData4 = armnnUtils::QuantizedVector<T>(
147  {
148  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
149  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
150  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
151  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
152  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
153  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
154  },
155  qScale, qOffset);
156 
157  std::vector<T> actualData1(outputTensorInfo1.GetNumElements());
158  std::vector<T> actualData2(outputTensorInfo2.GetNumElements());
159  std::vector<T> actualData3(outputTensorInfo3.GetNumElements());
160  std::vector<T> actualData4(outputTensorInfo4.GetNumElements());
161 
162  // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
163  // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
164  // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
165  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
167 
168  std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
170 
171  std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
173 
174  std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
176 
177  bool subTensorsSupported = tensorHandleFactory.SupportsSubTensors();
178  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
179 
180  std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
181  subTensorsSupported ?
182  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
183  tensorHandleFactory.CreateTensorHandle(outputTensorInfo1);
184 
185  std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
186  subTensorsSupported ?
187  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
188  tensorHandleFactory.CreateTensorHandle(outputTensorInfo2);
189 
190  std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
191  subTensorsSupported ?
192  tensorHandleFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
193  tensorHandleFactory.CreateTensorHandle(outputTensorInfo3);
194 
195  std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
196  subTensorsSupported ?
197  tensorHandleFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
198  tensorHandleFactory.CreateTensorHandle(outputTensorInfo4);
199 
200  // Do the first split
203  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
204  AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
205  AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
206 
207  data.m_ViewOrigins.push_back(window1);
208  data.m_ViewOrigins.push_back(window2);
209 
210  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
211  data,
212  info);
213 
214  inputHandle->Allocate();
215  outputHandle1->Allocate();
216  outputHandle2->Allocate();
217 
218  CopyDataToITensorHandle(inputHandle.get(), input.data());
219 
220  workload->Execute();
221 
222  CopyDataFromITensorHandle(actualData1.data(), outputHandle1.get());
223  CopyDataFromITensorHandle(actualData2.data(), outputHandle2.get());
224 
225  // Do the second split.
227  armnn::WorkloadInfo info2;
228  AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
229  AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
230  AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
231 
232  data2.m_ViewOrigins.push_back(window3);
233  data2.m_ViewOrigins.push_back(window4);
234 
235  std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
236  data2,
237  info2);
238 
239  outputHandle3->Allocate();
240  outputHandle4->Allocate();
241 
242  ExecuteWorkload(*workload2, memoryManager);
243 
244  CopyDataFromITensorHandle(actualData3.data(), outputHandle3.get());
245  CopyDataFromITensorHandle(actualData4.data(), outputHandle4.get());
246 
247  LayerTestResult<T,3> ret1(actualData1, expectedData1, outputHandle1->GetShape(), outputTensorInfo1.GetShape());
248  LayerTestResult<T,3> ret2(actualData2, expectedData2, outputHandle2->GetShape(), outputTensorInfo2.GetShape());
249  LayerTestResult<T,3> ret3(actualData3, expectedData3, outputHandle3->GetShape(), outputTensorInfo3.GetShape());
250  LayerTestResult<T,3> ret4(actualData4, expectedData4, outputHandle4->GetShape(), outputTensorInfo4.GetShape());
251 
252  std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
253 
254  return ret;
255 }
256 
257 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
258 LayerTestResult<T, 3> CopyViaSplitterTestImpl(
259  armnn::IWorkloadFactory& workloadFactory,
261  const armnn::ITensorHandleFactory& tensorHandleFactory,
262  float qScale, int32_t qOffset)
263 {
264  IgnoreUnused(memoryManager);
265 
266  const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
267  auto input = armnnUtils::QuantizedVector<T>(
268  {
269  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
270  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
271  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
272  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
273  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
274  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
275 
276  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
277  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
278  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
279  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
280  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
281  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
282 
283  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
284  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
285  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
286  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
287  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
288  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
289  },
290  qScale, qOffset);
291 
292  std::vector<T> actualOutput(tensorInfo.GetNumElements());
293 
294  std::vector<unsigned int> origin = { 0, 0, 0 };
296 
297  const bool subTensorsSupported = tensorHandleFactory.SupportsSubTensors();
298  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
299 
300  std::unique_ptr<armnn::ITensorHandle> outputHandle =
301  subTensorsSupported ?
302  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
303  tensorHandleFactory.CreateTensorHandle(tensorInfo);
304 
306  armnn::WorkloadInfo info;
307  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
308  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
309 
310  data.m_ViewOrigins.push_back(window);
311 
312  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
313  data,
314  info);
315 
316  inputHandle->Allocate();
317  outputHandle->Allocate();
318 
319  CopyDataToITensorHandle(inputHandle.get(), input.data());
320 
321  workload->Execute();
322 
323  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
324 
325  return LayerTestResult<T, 3>(actualOutput,
326  input,
327  outputHandle->GetShape(),
328  tensorInfo.GetShape());
329 }
330 
331 } // anonymous namespace
332 
333 std::vector<LayerTestResult<float,3>> SplitterFloat32Test(
334  armnn::IWorkloadFactory& workloadFactory,
336  const armnn::ITensorHandleFactory& tensorHandleFactory)
337 {
338  return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory);
339 }
340 
341 std::vector<LayerTestResult<armnn::Half,3>> SplitterFloat16Test(
342  armnn::IWorkloadFactory& workloadFactory,
344  const armnn::ITensorHandleFactory& tensorHandleFactory)
345 {
346  return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory);
347 }
348 
349 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
350  armnn::IWorkloadFactory& workloadFactory,
352  const armnn::ITensorHandleFactory& tensorHandleFactory)
353 {
354  return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
355 }
356 
357 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
358  armnn::IWorkloadFactory& workloadFactory,
360  const armnn::ITensorHandleFactory& tensorHandleFactory)
361 {
362  return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
363 }
364 
366  armnn::IWorkloadFactory& workloadFactory,
368  const armnn::ITensorHandleFactory& tensorHandleFactory)
369 {
370  return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory,
371  memoryManager,
372  tensorHandleFactory,
373  0.0f,
374  0);
375 }
376 
378  armnn::IWorkloadFactory& workloadFactory,
380  const armnn::ITensorHandleFactory& tensorHandleFactory)
381 {
382  return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory,
383  memoryManager,
384  tensorHandleFactory,
385  0.0f,
386  0);
387 }
388 
390  armnn::IWorkloadFactory& workloadFactory,
392  const armnn::ITensorHandleFactory& tensorHandleFactory)
393 {
394  return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory,
395  memoryManager,
396  tensorHandleFactory,
397  1.0f,
398  0);
399 }
400 
402  armnn::IWorkloadFactory& workloadFactory,
404  const armnn::ITensorHandleFactory& tensorHandleFactory)
405 {
406  return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory,
407  memoryManager,
408  tensorHandleFactory,
409  1.0f,
410  0);
411 }
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
virtual bool SupportsSubTensors() const =0
std::vector< ViewOrigin > m_ViewOrigins
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)