ArmNN
 20.08
SplitterTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SplitterTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
14 
15 #include <test/TensorHelpers.hpp>
16 
17 namespace
18 {
19 
20 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
22  armnn::IWorkloadFactory& workloadFactory,
24  float qScale = 0.0f,
25  int32_t qOffset = 0)
26 {
27  IgnoreUnused(memoryManager);
28  unsigned int inputWidth = 5;
29  unsigned int inputHeight = 6;
30  unsigned int inputChannels = 3;
31 
32  // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
33  // cannot be split.
34  // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
35  //
36  // This test has therefore been recast to split the channels, then split the resulting subtensor.
37 
38  // To take channel 0 of original output
39  // and channel 0 and channel 1 of the split subtensor.
40  unsigned int outputWidth1 = inputWidth;
41  unsigned int outputHeight1 = inputHeight;
42  unsigned int outputChannels1 = 1;
43 
44  // To take channel 1 and 2 of the original output.
45  unsigned int outputWidth2 = inputWidth;
46  unsigned int outputHeight2 = inputHeight;
47  unsigned int outputChannels2 = 2;
48 
49 
50  // Define the tensor descriptors.
51  armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
52 
53  // Outputs of the original split.
54  armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55  armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
56 
57  // Outputs of the subsequent subtensor split.
58  armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59  armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
60 
61  // Set quantization parameters if the requested type is a quantized type.
62  // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
63  if(armnn::IsQuantizedType<T>())
64  {
65  inputTensorInfo.SetQuantizationScale(qScale);
66  inputTensorInfo.SetQuantizationOffset(qOffset);
67  outputTensorInfo1.SetQuantizationScale(qScale);
68  outputTensorInfo1.SetQuantizationOffset(qOffset);
69  outputTensorInfo2.SetQuantizationScale(qScale);
70  outputTensorInfo2.SetQuantizationOffset(qOffset);
71  outputTensorInfo3.SetQuantizationScale(qScale);
72  outputTensorInfo3.SetQuantizationOffset(qOffset);
73  outputTensorInfo4.SetQuantizationScale(qScale);
74  outputTensorInfo4.SetQuantizationOffset(qOffset);
75  }
76 
77  LayerTestResult<T,3> ret1(outputTensorInfo1);
78  LayerTestResult<T,3> ret2(outputTensorInfo2);
79  LayerTestResult<T,3> ret3(outputTensorInfo3);
80  LayerTestResult<T,3> ret4(outputTensorInfo4);
81 
82  auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
83  armnnUtils::QuantizedVector<T>({
84  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
85  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
86  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
87  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
88  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
89  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
90 
91  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
92  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
93  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
94  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
95  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
96  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
97 
98  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
99  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
100  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
101  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
102  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
103  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
104  },
105  qScale, qOffset)
106  ));
107 
108  // Channel 0 of the original input.
109  ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
110  armnnUtils::QuantizedVector<T>({
111  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
112  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
113  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
114  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
115  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
116  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
117  },
118  qScale, qOffset)
119  ));
120 
121  // Channel 1 & 2 of the original input.
122  ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
123  armnnUtils::QuantizedVector<T>({
124  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
125  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
126  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
127  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
128  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
129  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
130 
131  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
132  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
133  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
134  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
135  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
136  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
137  },
138  qScale, qOffset)
139  ));
140 
141  // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
142  ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
143  armnnUtils::QuantizedVector<T>({
144  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
145  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
146  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
147  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
148  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
149  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
150  },
151  qScale, qOffset)
152  ));
153 
154  // Channel 1 of return 2.
155  ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
156  armnnUtils::QuantizedVector<T>({
157  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
158  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
159  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
160  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
161  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
162  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
163  },
164  qScale, qOffset)
165  ));
166 
167  // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
168  // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
169  // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
170  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
172 
173  std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
175 
176  std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
178 
179  std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
181 
182  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
183 
185  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
186 
187  std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
188  subTensorsSupported ?
189  workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
190  workloadFactory.CreateTensorHandle(outputTensorInfo1);
191 
192  std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
193  subTensorsSupported ?
194  workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
195  workloadFactory.CreateTensorHandle(outputTensorInfo2);
196 
197  std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
198  subTensorsSupported ?
199  workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
200  workloadFactory.CreateTensorHandle(outputTensorInfo3);
201 
202  std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
203  subTensorsSupported ?
204  workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
205  workloadFactory.CreateTensorHandle(outputTensorInfo4);
207 
208  // Do the first split
211  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
212  AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
213  AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
214 
215  data.m_ViewOrigins.push_back(window1);
216  data.m_ViewOrigins.push_back(window2);
217 
218  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
219 
220  inputHandle->Allocate();
221  outputHandle1->Allocate();
222  outputHandle2->Allocate();
223 
224  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
225 
226  workload->Execute();
227 
228  CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
229  CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
230 
231  // Do the second split.
233  armnn::WorkloadInfo info2;
234  AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
235  AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
236  AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
237 
238  data2.m_ViewOrigins.push_back(window3);
239  data2.m_ViewOrigins.push_back(window4);
240 
241  std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
242 
243  outputHandle3->Allocate();
244  outputHandle4->Allocate();
245 
246  ExecuteWorkload(*workload2, memoryManager);
247 
248  CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
249  CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
250 
251  std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
252 
253  return ret;
254 }
255 
256 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
257 LayerTestResult<T, 3> CopyViaSplitterTestImpl(
258  armnn::IWorkloadFactory& workloadFactory,
260  float qScale, int32_t qOffset)
261 {
262  IgnoreUnused(memoryManager);
263  const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
264  auto input = MakeTensor<T, 3>(
265  tensorInfo,
266  armnnUtils::QuantizedVector<T>({
267  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
268  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
269  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
270  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
271  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
272  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
273 
274  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
275  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
276  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
277  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
278  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
279  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
280 
281  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
282  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
283  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
284  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
285  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
286  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
287  },
288  qScale, qOffset));
289 
290  std::vector<unsigned int> origin = { 0, 0, 0 };
292 
293  const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
294 
296  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
297 
298  std::unique_ptr<armnn::ITensorHandle> outputHandle =
299  subTensorsSupported ?
300  workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
301  workloadFactory.CreateTensorHandle(tensorInfo);
303 
305  armnn::WorkloadInfo info;
306  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
307  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
308 
309  data.m_ViewOrigins.push_back(window);
310 
311  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
312 
313  inputHandle->Allocate();
314  outputHandle->Allocate();
315 
316  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
317 
318  workload->Execute();
319 
320  LayerTestResult<T, 3> ret(tensorInfo);
321  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
322  ret.outputExpected = input;
323 
324  return ret;
325 }
326 
327 } // anonymous namespace
328 
329 std::vector<LayerTestResult<float,3>> SplitterFloat32Test(
330  armnn::IWorkloadFactory& workloadFactory,
332 {
333  return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
334 }
335 
336 std::vector<LayerTestResult<armnn::Half,3>> SplitterFloat16Test(
337  armnn::IWorkloadFactory& workloadFactory,
339 {
340  return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager);
341 }
342 
343 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
344  armnn::IWorkloadFactory& workloadFactory,
346 {
347  return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
348 }
349 
350 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
351  armnn::IWorkloadFactory& workloadFactory,
353 {
354  return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
355 }
356 
358  armnn::IWorkloadFactory& workloadFactory,
360 {
361  return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
362 }
363 
365  armnn::IWorkloadFactory& workloadFactory,
367 {
368  return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
369 }
370 
372  armnn::IWorkloadFactory& workloadFactory,
374 {
375  return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
376 }
377 
379  armnn::IWorkloadFactory& workloadFactory,
381 {
382  return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
383 }
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0