ArmNN
 21.02
DebugTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "DebugTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
14 
15 #include <test/TensorHelpers.hpp>
16 
17 namespace
18 {
19 
20 template<typename T, std::size_t Dim>
21 LayerTestResult<T, Dim> DebugTestImpl(
22  armnn::IWorkloadFactory& workloadFactory,
24  armnn::TensorInfo& inputTensorInfo,
25  armnn::TensorInfo& outputTensorInfo,
26  std::vector<float>& inputData,
27  std::vector<float>& outputExpectedData,
28  armnn::DebugQueueDescriptor descriptor,
29  const std::string expectedStringOutput,
30  const float qScale = 1.0f,
31  const int32_t qOffset = 0)
32 {
33  IgnoreUnused(memoryManager);
34  if(armnn::IsQuantizedType<T>())
35  {
36  inputTensorInfo.SetQuantizationScale(qScale);
37  inputTensorInfo.SetQuantizationOffset(qOffset);
38 
39  outputTensorInfo.SetQuantizationScale(qScale);
40  outputTensorInfo.SetQuantizationOffset(qOffset);
41  }
42 
43  boost::multi_array<T, Dim> input =
44  MakeTensor<T, Dim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
45 
46  LayerTestResult<T, Dim> ret(outputTensorInfo);
47  ret.outputExpected =
48  MakeTensor<T, Dim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
49 
51  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
52  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
54 
56  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
57  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
58 
59  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDebug(descriptor, info);
60 
61  inputHandle->Allocate();
62  outputHandle->Allocate();
63 
64  CopyDataToITensorHandle(inputHandle.get(), input.data());
65 
66  std::ostringstream oss;
67  std::streambuf* coutStreambuf = std::cout.rdbuf();
68  std::cout.rdbuf(oss.rdbuf());
69 
70  ExecuteWorkload(*workload, memoryManager);
71 
72  std::cout.rdbuf(coutStreambuf);
73 
74  BOOST_TEST(oss.str() == expectedStringOutput);
75 
76  CopyDataFromITensorHandle(ret.output.data(), outputHandle.get());
77 
78  return ret;
79 }
80 
81 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
82 LayerTestResult<T, 4> Debug4dTest(
83  armnn::IWorkloadFactory& workloadFactory,
85 {
86  armnn::TensorInfo inputTensorInfo;
87  armnn::TensorInfo outputTensorInfo;
88 
89  unsigned int inputShape[] = {1, 2, 2, 3};
90  unsigned int outputShape[] = {1, 2, 2, 3};
91 
93  desc.m_Guid = 1;
94  desc.m_LayerName = "TestOutput";
95  desc.m_SlotIndex = 0;
96 
97  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
98  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
99 
100  std::vector<float> input = std::vector<float>(
101  {
102  1.0f, 2.0f, 3.0f,
103  4.0f, 5.0f, 6.0f,
104  7.0f, 8.0f, 9.0f,
105  10.0f, 11.0f, 12.0f,
106  });
107 
108  std::vector<float> outputExpected = std::vector<float>(
109  {
110  1.0f, 2.0f, 3.0f,
111  4.0f, 5.0f, 6.0f,
112  7.0f, 8.0f, 9.0f,
113  10.0f, 11.0f, 12.0f,
114  });
115 
116  const std::string expectedStringOutput =
117  "{ \"layerGuid\": 1,"
118  " \"layerName\": \"TestOutput\","
119  " \"outputSlot\": 0,"
120  " \"shape\": [1, 2, 2, 3],"
121  " \"min\": 1, \"max\": 12,"
122  " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
123 
124  return DebugTestImpl<T, 4>(workloadFactory,
125  memoryManager,
126  inputTensorInfo,
127  outputTensorInfo,
128  input,
129  outputExpected,
130  desc,
131  expectedStringOutput);
132 }
133 
134 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
135 LayerTestResult<T, 3> Debug3dTest(
136  armnn::IWorkloadFactory& workloadFactory,
138 {
139  armnn::TensorInfo inputTensorInfo;
140  armnn::TensorInfo outputTensorInfo;
141 
142  unsigned int inputShape[] = {3, 3, 1};
143  unsigned int outputShape[] = {3, 3, 1};
144 
146  desc.m_Guid = 1;
147  desc.m_LayerName = "TestOutput";
148  desc.m_SlotIndex = 0;
149 
150  inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
151  outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
152 
153  std::vector<float> input = std::vector<float>(
154  {
155  1.0f, 2.0f, 3.0f,
156  4.0f, 5.0f, 6.0f,
157  7.0f, 8.0f, 9.0f,
158  });
159 
160  std::vector<float> outputExpected = std::vector<float>(
161  {
162  1.0f, 2.0f, 3.0f,
163  4.0f, 5.0f, 6.0f,
164  7.0f, 8.0f, 9.0f,
165  });
166 
167  const std::string expectedStringOutput =
168  "{ \"layerGuid\": 1,"
169  " \"layerName\": \"TestOutput\","
170  " \"outputSlot\": 0,"
171  " \"shape\": [3, 3, 1],"
172  " \"min\": 1, \"max\": 9,"
173  " \"data\": [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]] }\n";
174 
175  return DebugTestImpl<T, 3>(workloadFactory,
176  memoryManager,
177  inputTensorInfo,
178  outputTensorInfo,
179  input,
180  outputExpected,
181  desc,
182  expectedStringOutput);
183 }
184 
185 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
186 LayerTestResult<T, 2> Debug2dTest(
187  armnn::IWorkloadFactory& workloadFactory,
189 {
190  armnn::TensorInfo inputTensorInfo;
191  armnn::TensorInfo outputTensorInfo;
192 
193  unsigned int inputShape[] = {2, 2};
194  unsigned int outputShape[] = {2, 2};
195 
197  desc.m_Guid = 1;
198  desc.m_LayerName = "TestOutput";
199  desc.m_SlotIndex = 0;
200 
201  inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
202  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
203 
204  std::vector<float> input = std::vector<float>(
205  {
206  1.0f, 2.0f,
207  3.0f, 4.0f,
208  });
209 
210  std::vector<float> outputExpected = std::vector<float>(
211  {
212  1.0f, 2.0f,
213  3.0f, 4.0f,
214  });
215 
216  const std::string expectedStringOutput =
217  "{ \"layerGuid\": 1,"
218  " \"layerName\": \"TestOutput\","
219  " \"outputSlot\": 0,"
220  " \"shape\": [2, 2],"
221  " \"min\": 1, \"max\": 4,"
222  " \"data\": [[1, 2], [3, 4]] }\n";
223 
224  return DebugTestImpl<T, 2>(workloadFactory,
225  memoryManager,
226  inputTensorInfo,
227  outputTensorInfo,
228  input,
229  outputExpected,
230  desc,
231  expectedStringOutput);
232 }
233 
234 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
235 LayerTestResult<T, 1> Debug1dTest(
236  armnn::IWorkloadFactory& workloadFactory,
238 {
239  armnn::TensorInfo inputTensorInfo;
240  armnn::TensorInfo outputTensorInfo;
241 
242  unsigned int inputShape[] = {4};
243  unsigned int outputShape[] = {4};
244 
246  desc.m_Guid = 1;
247  desc.m_LayerName = "TestOutput";
248  desc.m_SlotIndex = 0;
249 
250  inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
251  outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
252 
253  std::vector<float> input = std::vector<float>(
254  {
255  1.0f, 2.0f, 3.0f, 4.0f,
256  });
257 
258  std::vector<float> outputExpected = std::vector<float>(
259  {
260  1.0f, 2.0f, 3.0f, 4.0f,
261  });
262 
263  const std::string expectedStringOutput =
264  "{ \"layerGuid\": 1,"
265  " \"layerName\": \"TestOutput\","
266  " \"outputSlot\": 0,"
267  " \"shape\": [4],"
268  " \"min\": 1, \"max\": 4,"
269  " \"data\": [1, 2, 3, 4] }\n";
270 
271  return DebugTestImpl<T, 1>(workloadFactory,
272  memoryManager,
273  inputTensorInfo,
274  outputTensorInfo,
275  input,
276  outputExpected,
277  desc,
278  expectedStringOutput);
279 }
280 
281 } // anonymous namespace
282 
284  armnn::IWorkloadFactory& workloadFactory,
286 {
287  return Debug4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
288 }
289 
291  armnn::IWorkloadFactory& workloadFactory,
293 {
294  return Debug3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
295 }
296 
298  armnn::IWorkloadFactory& workloadFactory,
300 {
301  return Debug2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
302 }
303 
305  armnn::IWorkloadFactory& workloadFactory,
307 {
308  return Debug1dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
309 }
310 
312  armnn::IWorkloadFactory& workloadFactory,
314 {
315  return Debug4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
316 }
317 
319  armnn::IWorkloadFactory& workloadFactory,
321 {
322  return Debug3dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
323 }
324 
326  armnn::IWorkloadFactory& workloadFactory,
328 {
329  return Debug2dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
330 }
331 
333  armnn::IWorkloadFactory& workloadFactory,
335 {
336  return Debug1dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
337 }
338 
340  armnn::IWorkloadFactory& workloadFactory,
342 {
343  return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
344 }
345 
347  armnn::IWorkloadFactory& workloadFactory,
349 {
350  return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
351 }
352 
354  armnn::IWorkloadFactory& workloadFactory,
356 {
357  return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
358 }
359 
361  armnn::IWorkloadFactory& workloadFactory,
363 {
364  return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
365 }
366 
368  armnn::IWorkloadFactory& workloadFactory,
370 {
371  return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
372 }
373 
375  armnn::IWorkloadFactory& workloadFactory,
377 {
378  return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
379 }
380 
382  armnn::IWorkloadFactory& workloadFactory,
384 {
385  return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
386 }
387 
389  armnn::IWorkloadFactory& workloadFactory,
391 {
392  return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
393 }
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< int16_t, 3 > Debug3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 1 > Debug1dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 1 > Debug1dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 1 > Debug1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 1 > Debug1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 2 > Debug2dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
LayerTestResult< armnn::BFloat16, 3 > Debug3dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< armnn::BFloat16, 2 > Debug2dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > Debug4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > Debug3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 4 > Debug4dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > Debug4dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 2 > Debug2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
LayerTestResult< uint8_t, 4 > Debug4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 2 > Debug2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 3 > Debug3dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)