ArmNN
 21.11
SplitterTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SplitterTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
14 
15 #include <test/TensorHelpers.hpp>
16 
17 namespace
18 {
19 
20 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
22  armnn::IWorkloadFactory& workloadFactory,
24  const armnn::ITensorHandleFactory& tensorHandleFactory,
25  float qScale = 0.0f,
26  int32_t qOffset = 0)
27 {
28  IgnoreUnused(memoryManager);
29  unsigned int inputWidth = 5;
30  unsigned int inputHeight = 6;
31  unsigned int inputChannels = 3;
32 
33  // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
34  // cannot be split.
35  // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
36  //
37  // This test has therefore been recast to split the channels, then split the resulting subtensor.
38 
39  // To take channel 0 of original output
40  // and channel 0 and channel 1 of the split subtensor.
41  unsigned int outputWidth1 = inputWidth;
42  unsigned int outputHeight1 = inputHeight;
43  unsigned int outputChannels1 = 1;
44 
45  // To take channel 1 and 2 of the original output.
46  unsigned int outputWidth2 = inputWidth;
47  unsigned int outputHeight2 = inputHeight;
48  unsigned int outputChannels2 = 2;
49 
50  // Define the tensor descriptors.
51  armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
52 
53  // Outputs of the original split.
54  armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55  armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
56 
57  // Outputs of the subsequent subtensor split.
58  armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59  armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
60 
61  // Set quantization parameters if the requested type is a quantized type.
62  // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
63  if(armnn::IsQuantizedType<T>())
64  {
65  inputTensorInfo.SetQuantizationScale(qScale);
66  inputTensorInfo.SetQuantizationOffset(qOffset);
67  outputTensorInfo1.SetQuantizationScale(qScale);
68  outputTensorInfo1.SetQuantizationOffset(qOffset);
69  outputTensorInfo2.SetQuantizationScale(qScale);
70  outputTensorInfo2.SetQuantizationOffset(qOffset);
71  outputTensorInfo3.SetQuantizationScale(qScale);
72  outputTensorInfo3.SetQuantizationOffset(qOffset);
73  outputTensorInfo4.SetQuantizationScale(qScale);
74  outputTensorInfo4.SetQuantizationOffset(qOffset);
75  }
76 
77  auto input = armnnUtils::QuantizedVector<T>(
78  {
79  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
80  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
81  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
82  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
83  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
84  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
85 
86  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
87  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
88  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
89  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
90  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
91  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
92 
93  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
94  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
95  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
96  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
97  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
98  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
99  },
100  qScale, qOffset);
101 
102  // Channel 0 of the original input.
103  auto expectedData1 = armnnUtils::QuantizedVector<T>(
104  {
105  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
106  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
107  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
108  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
109  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
110  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
111  },
112  qScale, qOffset);
113 
114  // Channel 1 & 2 of the original input.
115  auto expectedData2 = armnnUtils::QuantizedVector<T>(
116  {
117  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
118  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
119  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
120  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
121  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
122  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
123 
124  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
125  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
126  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
127  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
128  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
129  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
130  },
131  qScale, qOffset);
132 
133  // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
134  auto expectedData3 = armnnUtils::QuantizedVector<T>(
135  {
136  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
137  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
138  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
139  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
140  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
141  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
142  },
143  qScale, qOffset);
144 
145  // Channel 1 of return 2.
146  auto expectedData4 = armnnUtils::QuantizedVector<T>(
147  {
148  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
149  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
150  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
151  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
152  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
153  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
154  },
155  qScale, qOffset);
156 
157  std::vector<T> actualData1(outputTensorInfo1.GetNumElements());
158  std::vector<T> actualData2(outputTensorInfo2.GetNumElements());
159  std::vector<T> actualData3(outputTensorInfo3.GetNumElements());
160  std::vector<T> actualData4(outputTensorInfo4.GetNumElements());
161 
162  // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
163  // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
164  // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
165  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
167 
168  std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
170 
171  std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
173 
174  std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
176 
177  bool subTensorsSupported = tensorHandleFactory.SupportsSubTensors();
178  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
179 
180  std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
181  subTensorsSupported ?
182  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
183  tensorHandleFactory.CreateTensorHandle(outputTensorInfo1);
184 
185  std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
186  subTensorsSupported ?
187  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
188  tensorHandleFactory.CreateTensorHandle(outputTensorInfo2);
189 
190  std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
191  subTensorsSupported ?
192  tensorHandleFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
193  tensorHandleFactory.CreateTensorHandle(outputTensorInfo3);
194 
195  std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
196  subTensorsSupported ?
197  tensorHandleFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
198  tensorHandleFactory.CreateTensorHandle(outputTensorInfo4);
199 
200  // Do the first split
203  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
204  AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
205  AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
206 
207  data.m_ViewOrigins.push_back(window1);
208  data.m_ViewOrigins.push_back(window2);
209 
210  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
211 
212  inputHandle->Allocate();
213  outputHandle1->Allocate();
214  outputHandle2->Allocate();
215 
216  CopyDataToITensorHandle(inputHandle.get(), input.data());
217 
218  workload->Execute();
219 
220  CopyDataFromITensorHandle(actualData1.data(), outputHandle1.get());
221  CopyDataFromITensorHandle(actualData2.data(), outputHandle2.get());
222 
223  // Do the second split.
225  armnn::WorkloadInfo info2;
226  AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
227  AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
228  AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
229 
230  data2.m_ViewOrigins.push_back(window3);
231  data2.m_ViewOrigins.push_back(window4);
232 
233  std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
234 
235  outputHandle3->Allocate();
236  outputHandle4->Allocate();
237 
238  ExecuteWorkload(*workload2, memoryManager);
239 
240  CopyDataFromITensorHandle(actualData3.data(), outputHandle3.get());
241  CopyDataFromITensorHandle(actualData4.data(), outputHandle4.get());
242 
243  LayerTestResult<T,3> ret1(actualData1, expectedData1, outputHandle1->GetShape(), outputTensorInfo1.GetShape());
244  LayerTestResult<T,3> ret2(actualData2, expectedData2, outputHandle2->GetShape(), outputTensorInfo2.GetShape());
245  LayerTestResult<T,3> ret3(actualData3, expectedData3, outputHandle3->GetShape(), outputTensorInfo3.GetShape());
246  LayerTestResult<T,3> ret4(actualData4, expectedData4, outputHandle4->GetShape(), outputTensorInfo4.GetShape());
247 
248  std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
249 
250  return ret;
251 }
252 
253 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
254 LayerTestResult<T, 3> CopyViaSplitterTestImpl(
255  armnn::IWorkloadFactory& workloadFactory,
257  const armnn::ITensorHandleFactory& tensorHandleFactory,
258  float qScale, int32_t qOffset)
259 {
260  IgnoreUnused(memoryManager);
261 
262  const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
263  auto input = armnnUtils::QuantizedVector<T>(
264  {
265  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
266  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
267  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
268  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
269  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
270  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
271 
272  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
273  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
274  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
275  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
276  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
277  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
278 
279  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
280  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
281  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
282  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
283  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
284  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
285  },
286  qScale, qOffset);
287 
288  std::vector<T> actualOutput(tensorInfo.GetNumElements());
289 
290  std::vector<unsigned int> origin = { 0, 0, 0 };
292 
293  const bool subTensorsSupported = tensorHandleFactory.SupportsSubTensors();
294  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(tensorInfo);
295 
296  std::unique_ptr<armnn::ITensorHandle> outputHandle =
297  subTensorsSupported ?
298  tensorHandleFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
299  tensorHandleFactory.CreateTensorHandle(tensorInfo);
300 
302  armnn::WorkloadInfo info;
303  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
304  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
305 
306  data.m_ViewOrigins.push_back(window);
307 
308  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
309 
310  inputHandle->Allocate();
311  outputHandle->Allocate();
312 
313  CopyDataToITensorHandle(inputHandle.get(), input.data());
314 
315  workload->Execute();
316 
317  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
318 
319  return LayerTestResult<T, 3>(actualOutput,
320  input,
321  outputHandle->GetShape(),
322  tensorInfo.GetShape());
323 }
324 
325 } // anonymous namespace
326 
327 std::vector<LayerTestResult<float,3>> SplitterFloat32Test(
328  armnn::IWorkloadFactory& workloadFactory,
330  const armnn::ITensorHandleFactory& tensorHandleFactory)
331 {
332  return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory);
333 }
334 
335 std::vector<LayerTestResult<armnn::Half,3>> SplitterFloat16Test(
336  armnn::IWorkloadFactory& workloadFactory,
338  const armnn::ITensorHandleFactory& tensorHandleFactory)
339 {
340  return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory);
341 }
342 
343 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
344  armnn::IWorkloadFactory& workloadFactory,
346  const armnn::ITensorHandleFactory& tensorHandleFactory)
347 {
348  return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
349 }
350 
351 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
352  armnn::IWorkloadFactory& workloadFactory,
354  const armnn::ITensorHandleFactory& tensorHandleFactory)
355 {
356  return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
357 }
358 
360  armnn::IWorkloadFactory& workloadFactory,
362  const armnn::ITensorHandleFactory& tensorHandleFactory)
363 {
364  return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory,
365  memoryManager,
366  tensorHandleFactory,
367  0.0f,
368  0);
369 }
370 
372  armnn::IWorkloadFactory& workloadFactory,
374  const armnn::ITensorHandleFactory& tensorHandleFactory)
375 {
376  return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory,
377  memoryManager,
378  tensorHandleFactory,
379  0.0f,
380  0);
381 }
382 
384  armnn::IWorkloadFactory& workloadFactory,
386  const armnn::ITensorHandleFactory& tensorHandleFactory)
387 {
388  return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory,
389  memoryManager,
390  tensorHandleFactory,
391  1.0f,
392  0);
393 }
394 
396  armnn::IWorkloadFactory& workloadFactory,
398  const armnn::ITensorHandleFactory& tensorHandleFactory)
399 {
400  return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory,
401  memoryManager,
402  tensorHandleFactory,
403  1.0f,
404  0);
405 }
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
virtual bool SupportsSubTensors() const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)