ArmNN
 20.02
SplitterTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SplitterTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
14 
15 #include <test/TensorHelpers.hpp>
16 
17 namespace
18 {
19 
20 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
22  armnn::IWorkloadFactory& workloadFactory,
24  float qScale = 0.0f,
25  int32_t qOffset = 0)
26 {
27  IgnoreUnused(memoryManager);
28  unsigned int inputWidth = 5;
29  unsigned int inputHeight = 6;
30  unsigned int inputChannels = 3;
31 
32  // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
33  // cannot be split.
34  // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
35  //
36  // This test has therefore been recast to split the channels, then split the resulting subtensor.
37 
38  // To take channel 0 of original output
39  // and channel 0 and channel 1 of the split subtensor.
40  unsigned int outputWidth1 = inputWidth;
41  unsigned int outputHeight1 = inputHeight;
42  unsigned int outputChannels1 = 1;
43 
44  // To take channel 1 and 2 of the original output.
45  unsigned int outputWidth2 = inputWidth;
46  unsigned int outputHeight2 = inputHeight;
47  unsigned int outputChannels2 = 2;
48 
49 
50  // Define the tensor descriptors.
51  armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
52 
53  // Outputs of the original split.
54  armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55  armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
56 
57  // Outputs of the subsequent subtensor split.
58  armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59  armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
60 
61  // Set quantization parameters if the requested type is a quantized type.
62  // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
63  if(armnn::IsQuantizedType<T>())
64  {
65  inputTensorInfo.SetQuantizationScale(qScale);
66  inputTensorInfo.SetQuantizationOffset(qOffset);
67  outputTensorInfo1.SetQuantizationScale(qScale);
68  outputTensorInfo1.SetQuantizationOffset(qOffset);
69  outputTensorInfo2.SetQuantizationScale(qScale);
70  outputTensorInfo2.SetQuantizationOffset(qOffset);
71  outputTensorInfo3.SetQuantizationScale(qScale);
72  outputTensorInfo3.SetQuantizationOffset(qOffset);
73  outputTensorInfo4.SetQuantizationScale(qScale);
74  outputTensorInfo4.SetQuantizationOffset(qOffset);
75  }
76 
77  LayerTestResult<T,3> ret1(outputTensorInfo1);
78  LayerTestResult<T,3> ret2(outputTensorInfo2);
79  LayerTestResult<T,3> ret3(outputTensorInfo3);
80  LayerTestResult<T,3> ret4(outputTensorInfo4);
81 
82  auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
83  armnnUtils::QuantizedVector<T>({
84  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
85  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
86  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
87  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
88  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
89  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
90 
91  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
92  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
93  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
94  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
95  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
96  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
97 
98  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
99  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
100  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
101  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
102  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
103  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
104  },
105  qScale, qOffset)
106  ));
107 
108  // Channel 0 of the original input.
109  ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
110  armnnUtils::QuantizedVector<T>({
111  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
112  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
113  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
114  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
115  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
116  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
117  },
118  qScale, qOffset)
119  ));
120 
121  // Channel 1 & 2 of the original input.
122  ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
123  armnnUtils::QuantizedVector<T>({
124  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
125  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
126  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
127  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
128  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
129  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
130 
131  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
132  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
133  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
134  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
135  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
136  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
137  },
138  qScale, qOffset)
139  ));
140 
141  // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
142  ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
143  armnnUtils::QuantizedVector<T>({
144  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
145  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
146  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
147  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
148  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
149  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
150  },
151  qScale, qOffset)
152  ));
153 
154  // Channel 1 of return 2.
155  ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
156  armnnUtils::QuantizedVector<T>({
157  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
158  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
159  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
160  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
161  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
162  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
163  },
164  qScale, qOffset)
165  ));
166 
167  // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
168  // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
169  // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
170  std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
172 
173  std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
175 
176  std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
178 
179  std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
181 
182  bool subTensorsSupported = workloadFactory.SupportsSubTensors();
183 
184  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
185 
186  std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
187  subTensorsSupported ?
188  workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
189  workloadFactory.CreateTensorHandle(outputTensorInfo1);
190 
191  std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
192  subTensorsSupported ?
193  workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
194  workloadFactory.CreateTensorHandle(outputTensorInfo2);
195 
196  std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
197  subTensorsSupported ?
198  workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
199  workloadFactory.CreateTensorHandle(outputTensorInfo3);
200 
201  std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
202  subTensorsSupported ?
203  workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
204  workloadFactory.CreateTensorHandle(outputTensorInfo4);
205 
206  // Do the first split
209  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
210  AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
211  AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
212 
213  data.m_ViewOrigins.push_back(window1);
214  data.m_ViewOrigins.push_back(window2);
215 
216  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
217 
218  inputHandle->Allocate();
219  outputHandle1->Allocate();
220  outputHandle2->Allocate();
221 
222  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
223 
224  workload->Execute();
225 
226  CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
227  CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
228 
229  // Do the second split.
231  armnn::WorkloadInfo info2;
232  AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
233  AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
234  AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
235 
236  data2.m_ViewOrigins.push_back(window3);
237  data2.m_ViewOrigins.push_back(window4);
238 
239  std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
240 
241  outputHandle3->Allocate();
242  outputHandle4->Allocate();
243 
244  ExecuteWorkload(*workload2, memoryManager);
245 
246  CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
247  CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
248 
249  std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
250 
251  return ret;
252 }
253 
254 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
255 LayerTestResult<T, 3> CopyViaSplitterTestImpl(
256  armnn::IWorkloadFactory& workloadFactory,
258  float qScale, int32_t qOffset)
259 {
260  IgnoreUnused(memoryManager);
261  const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
262  auto input = MakeTensor<T, 3>(
263  tensorInfo,
264  armnnUtils::QuantizedVector<T>({
265  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
266  6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
267  11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
268  16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
269  21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
270  26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
271 
272  31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
273  36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
274  41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
275  46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
276  51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
277  56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
278 
279  61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
280  66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
281  71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
282  76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
283  81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
284  86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
285  },
286  qScale, qOffset));
287 
288  std::vector<unsigned int> origin = { 0, 0, 0 };
290 
291  const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
292 
293  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
294 
295  std::unique_ptr<armnn::ITensorHandle> outputHandle =
296  subTensorsSupported ?
297  workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
298  workloadFactory.CreateTensorHandle(tensorInfo);
299 
301  armnn::WorkloadInfo info;
302  AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
303  AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
304 
305  data.m_ViewOrigins.push_back(window);
306 
307  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
308 
309  inputHandle->Allocate();
310  outputHandle->Allocate();
311 
312  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
313 
314  workload->Execute();
315 
316  LayerTestResult<T, 3> ret(tensorInfo);
317  CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
318  ret.outputExpected = input;
319 
320  return ret;
321 }
322 
323 } // anonymous namespace
324 
325 std::vector<LayerTestResult<float,3>> SplitterFloat32Test(
326  armnn::IWorkloadFactory& workloadFactory,
328 {
329  return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
330 }
331 
332 std::vector<LayerTestResult<armnn::Half,3>> SplitterFloat16Test(
333  armnn::IWorkloadFactory& workloadFactory,
335 {
336  return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager);
337 }
338 
339 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
340  armnn::IWorkloadFactory& workloadFactory,
342 {
343  return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
344 }
345 
346 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
347  armnn::IWorkloadFactory& workloadFactory,
349 {
350  return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
351 }
352 
354  armnn::IWorkloadFactory& workloadFactory,
356 {
357  return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
358 }
359 
361  armnn::IWorkloadFactory& workloadFactory,
363 {
364  return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
365 }
366 
368  armnn::IWorkloadFactory& workloadFactory,
370 {
371  return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
372 }
373 
375  armnn::IWorkloadFactory& workloadFactory,
377 {
378  return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
379 }
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0