ArmNN
 21.08
DebugTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "DebugTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
14 
15 #include <test/TensorHelpers.hpp>
16 
17 #include <doctest/doctest.h>
18 
19 namespace
20 {
21 
22 template<typename T, std::size_t Dim>
23 LayerTestResult<T, Dim> DebugTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  armnn::TensorInfo& inputTensorInfo,
27  armnn::TensorInfo& outputTensorInfo,
28  std::vector<float>& inputData,
29  std::vector<float>& outputExpectedData,
30  armnn::DebugQueueDescriptor descriptor,
31  const std::string expectedStringOutput,
32  const float qScale = 1.0f,
33  const int32_t qOffset = 0)
34 {
35  IgnoreUnused(memoryManager);
36  if(armnn::IsQuantizedType<T>())
37  {
38  inputTensorInfo.SetQuantizationScale(qScale);
39  inputTensorInfo.SetQuantizationOffset(qOffset);
40 
41  outputTensorInfo.SetQuantizationScale(qScale);
42  outputTensorInfo.SetQuantizationOffset(qOffset);
43  }
44 
45  std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
46 
47  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
48  std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
49 
51  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
52  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
54 
56  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
57  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
58 
59  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDebug(descriptor, info);
60 
61  inputHandle->Allocate();
62  outputHandle->Allocate();
63 
64  CopyDataToITensorHandle(inputHandle.get(), input.data());
65 
66  std::ostringstream oss;
67  std::streambuf* coutStreambuf = std::cout.rdbuf();
68  std::cout.rdbuf(oss.rdbuf());
69 
70  ExecuteWorkload(*workload, memoryManager);
71 
72  std::cout.rdbuf(coutStreambuf);
73 
74  CHECK(oss.str() == expectedStringOutput);
75 
76  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
77 
78  return LayerTestResult<T, Dim>(actualOutput,
79  expectedOutput,
80  outputHandle->GetShape(),
81  outputTensorInfo.GetShape());
82 }
83 
84 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
85 LayerTestResult<T, 4> Debug4dTest(
86  armnn::IWorkloadFactory& workloadFactory,
88 {
89  armnn::TensorInfo inputTensorInfo;
90  armnn::TensorInfo outputTensorInfo;
91 
92  unsigned int inputShape[] = {1, 2, 2, 3};
93  unsigned int outputShape[] = {1, 2, 2, 3};
94 
96  desc.m_Guid = 1;
97  desc.m_LayerName = "TestOutput";
98  desc.m_SlotIndex = 0;
99 
100  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
101  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
102 
103  std::vector<float> input = std::vector<float>(
104  {
105  1.0f, 2.0f, 3.0f,
106  4.0f, 5.0f, 6.0f,
107  7.0f, 8.0f, 9.0f,
108  10.0f, 11.0f, 12.0f,
109  });
110 
111  std::vector<float> outputExpected = std::vector<float>(
112  {
113  1.0f, 2.0f, 3.0f,
114  4.0f, 5.0f, 6.0f,
115  7.0f, 8.0f, 9.0f,
116  10.0f, 11.0f, 12.0f,
117  });
118 
119  const std::string expectedStringOutput =
120  "{ \"layerGuid\": 1,"
121  " \"layerName\": \"TestOutput\","
122  " \"outputSlot\": 0,"
123  " \"shape\": [1, 2, 2, 3],"
124  " \"min\": 1, \"max\": 12,"
125  " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
126 
127  return DebugTestImpl<T, 4>(workloadFactory,
128  memoryManager,
129  inputTensorInfo,
130  outputTensorInfo,
131  input,
132  outputExpected,
133  desc,
134  expectedStringOutput);
135 }
136 
137 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
138 LayerTestResult<T, 3> Debug3dTest(
139  armnn::IWorkloadFactory& workloadFactory,
141 {
142  armnn::TensorInfo inputTensorInfo;
143  armnn::TensorInfo outputTensorInfo;
144 
145  unsigned int inputShape[] = {3, 3, 1};
146  unsigned int outputShape[] = {3, 3, 1};
147 
149  desc.m_Guid = 1;
150  desc.m_LayerName = "TestOutput";
151  desc.m_SlotIndex = 0;
152 
153  inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
154  outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
155 
156  std::vector<float> input = std::vector<float>(
157  {
158  1.0f, 2.0f, 3.0f,
159  4.0f, 5.0f, 6.0f,
160  7.0f, 8.0f, 9.0f,
161  });
162 
163  std::vector<float> outputExpected = std::vector<float>(
164  {
165  1.0f, 2.0f, 3.0f,
166  4.0f, 5.0f, 6.0f,
167  7.0f, 8.0f, 9.0f,
168  });
169 
170  const std::string expectedStringOutput =
171  "{ \"layerGuid\": 1,"
172  " \"layerName\": \"TestOutput\","
173  " \"outputSlot\": 0,"
174  " \"shape\": [3, 3, 1],"
175  " \"min\": 1, \"max\": 9,"
176  " \"data\": [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]] }\n";
177 
178  return DebugTestImpl<T, 3>(workloadFactory,
179  memoryManager,
180  inputTensorInfo,
181  outputTensorInfo,
182  input,
183  outputExpected,
184  desc,
185  expectedStringOutput);
186 }
187 
188 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
189 LayerTestResult<T, 2> Debug2dTest(
190  armnn::IWorkloadFactory& workloadFactory,
192 {
193  armnn::TensorInfo inputTensorInfo;
194  armnn::TensorInfo outputTensorInfo;
195 
196  unsigned int inputShape[] = {2, 2};
197  unsigned int outputShape[] = {2, 2};
198 
200  desc.m_Guid = 1;
201  desc.m_LayerName = "TestOutput";
202  desc.m_SlotIndex = 0;
203 
204  inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
205  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
206 
207  std::vector<float> input = std::vector<float>(
208  {
209  1.0f, 2.0f,
210  3.0f, 4.0f,
211  });
212 
213  std::vector<float> outputExpected = std::vector<float>(
214  {
215  1.0f, 2.0f,
216  3.0f, 4.0f,
217  });
218 
219  const std::string expectedStringOutput =
220  "{ \"layerGuid\": 1,"
221  " \"layerName\": \"TestOutput\","
222  " \"outputSlot\": 0,"
223  " \"shape\": [2, 2],"
224  " \"min\": 1, \"max\": 4,"
225  " \"data\": [[1, 2], [3, 4]] }\n";
226 
227  return DebugTestImpl<T, 2>(workloadFactory,
228  memoryManager,
229  inputTensorInfo,
230  outputTensorInfo,
231  input,
232  outputExpected,
233  desc,
234  expectedStringOutput);
235 }
236 
237 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
238 LayerTestResult<T, 1> Debug1dTest(
239  armnn::IWorkloadFactory& workloadFactory,
241 {
242  armnn::TensorInfo inputTensorInfo;
243  armnn::TensorInfo outputTensorInfo;
244 
245  unsigned int inputShape[] = {4};
246  unsigned int outputShape[] = {4};
247 
249  desc.m_Guid = 1;
250  desc.m_LayerName = "TestOutput";
251  desc.m_SlotIndex = 0;
252 
253  inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
254  outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
255 
256  std::vector<float> input = std::vector<float>(
257  {
258  1.0f, 2.0f, 3.0f, 4.0f,
259  });
260 
261  std::vector<float> outputExpected = std::vector<float>(
262  {
263  1.0f, 2.0f, 3.0f, 4.0f,
264  });
265 
266  const std::string expectedStringOutput =
267  "{ \"layerGuid\": 1,"
268  " \"layerName\": \"TestOutput\","
269  " \"outputSlot\": 0,"
270  " \"shape\": [4],"
271  " \"min\": 1, \"max\": 4,"
272  " \"data\": [1, 2, 3, 4] }\n";
273 
274  return DebugTestImpl<T, 1>(workloadFactory,
275  memoryManager,
276  inputTensorInfo,
277  outputTensorInfo,
278  input,
279  outputExpected,
280  desc,
281  expectedStringOutput);
282 }
283 
284 } // anonymous namespace
285 
287  armnn::IWorkloadFactory& workloadFactory,
289 {
290  return Debug4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
291 }
292 
294  armnn::IWorkloadFactory& workloadFactory,
296 {
297  return Debug3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
298 }
299 
301  armnn::IWorkloadFactory& workloadFactory,
303 {
304  return Debug2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
305 }
306 
308  armnn::IWorkloadFactory& workloadFactory,
310 {
311  return Debug1dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
312 }
313 
315  armnn::IWorkloadFactory& workloadFactory,
317 {
318  return Debug4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
319 }
320 
322  armnn::IWorkloadFactory& workloadFactory,
324 {
325  return Debug3dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
326 }
327 
329  armnn::IWorkloadFactory& workloadFactory,
331 {
332  return Debug2dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
333 }
334 
336  armnn::IWorkloadFactory& workloadFactory,
338 {
339  return Debug1dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
340 }
341 
343  armnn::IWorkloadFactory& workloadFactory,
345 {
346  return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
347 }
348 
350  armnn::IWorkloadFactory& workloadFactory,
352 {
353  return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
354 }
355 
357  armnn::IWorkloadFactory& workloadFactory,
359 {
360  return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
361 }
362 
364  armnn::IWorkloadFactory& workloadFactory,
366 {
367  return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
368 }
369 
371  armnn::IWorkloadFactory& workloadFactory,
373 {
374  return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
375 }
376 
378  armnn::IWorkloadFactory& workloadFactory,
380 {
381  return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
382 }
383 
385  armnn::IWorkloadFactory& workloadFactory,
387 {
388  return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
389 }
390 
392  armnn::IWorkloadFactory& workloadFactory,
394 {
395  return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
396 }
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< int16_t, 3 > Debug3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 1 > Debug1dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 1 > Debug1dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 1 > Debug1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 1 > Debug1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 2 > Debug2dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< armnn::BFloat16, 3 > Debug3dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< armnn::BFloat16, 2 > Debug2dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > Debug4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > Debug3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 4 > Debug4dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > Debug4dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 2 > Debug2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
LayerTestResult< uint8_t, 4 > Debug4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 2 > Debug2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 3 > Debug3dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)