ArmNN
 22.08
DebugTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "DebugTestImpl.hpp"
7 
9 #include <ResolveType.hpp>
10 
11 
14 
16 
17 #include <doctest/doctest.h>
18 
19 namespace
20 {
21 
22 template<typename T, std::size_t Dim>
23 LayerTestResult<T, Dim> DebugTestImpl(
24  armnn::IWorkloadFactory& workloadFactory,
26  armnn::TensorInfo& inputTensorInfo,
27  armnn::TensorInfo& outputTensorInfo,
28  std::vector<float>& inputData,
29  std::vector<float>& outputExpectedData,
30  armnn::DebugQueueDescriptor descriptor,
31  const std::string expectedStringOutput,
32  const float qScale = 1.0f,
33  const int32_t qOffset = 0)
34 {
35  IgnoreUnused(memoryManager);
36  if(armnn::IsQuantizedType<T>())
37  {
38  inputTensorInfo.SetQuantizationScale(qScale);
39  inputTensorInfo.SetQuantizationOffset(qOffset);
40 
41  outputTensorInfo.SetQuantizationScale(qScale);
42  outputTensorInfo.SetQuantizationOffset(qOffset);
43  }
44 
45  std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
46 
47  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
48  std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset);
49 
51  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
52  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
54 
56  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
57  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
58 
59  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Debug,
60  descriptor,
61  info);
62 
63  inputHandle->Allocate();
64  outputHandle->Allocate();
65 
66  CopyDataToITensorHandle(inputHandle.get(), input.data());
67 
68  std::ostringstream oss;
69  std::streambuf* coutStreambuf = std::cout.rdbuf();
70  std::cout.rdbuf(oss.rdbuf());
71 
72  ExecuteWorkload(*workload, memoryManager);
73 
74  std::cout.rdbuf(coutStreambuf);
75 
76  CHECK(oss.str() == expectedStringOutput);
77 
78  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
79 
80  return LayerTestResult<T, Dim>(actualOutput,
81  expectedOutput,
82  outputHandle->GetShape(),
83  outputTensorInfo.GetShape());
84 }
85 
86 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
87 LayerTestResult<T, 4> Debug4dTest(
88  armnn::IWorkloadFactory& workloadFactory,
90 {
91  armnn::TensorInfo inputTensorInfo;
92  armnn::TensorInfo outputTensorInfo;
93 
94  unsigned int inputShape[] = {1, 2, 2, 3};
95  unsigned int outputShape[] = {1, 2, 2, 3};
96 
98  desc.m_Guid = 1;
99  desc.m_LayerName = "TestOutput";
100  desc.m_SlotIndex = 0;
101 
102  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
103  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
104 
105  std::vector<float> input = std::vector<float>(
106  {
107  1.0f, 2.0f, 3.0f,
108  4.0f, 5.0f, 6.0f,
109  7.0f, 8.0f, 9.0f,
110  10.0f, 11.0f, 12.0f,
111  });
112 
113  std::vector<float> outputExpected = std::vector<float>(
114  {
115  1.0f, 2.0f, 3.0f,
116  4.0f, 5.0f, 6.0f,
117  7.0f, 8.0f, 9.0f,
118  10.0f, 11.0f, 12.0f,
119  });
120 
121  const std::string expectedStringOutput =
122  "{ \"layerGuid\": 1,"
123  " \"layerName\": \"TestOutput\","
124  " \"outputSlot\": 0,"
125  " \"shape\": [1, 2, 2, 3],"
126  " \"min\": 1, \"max\": 12,"
127  " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
128 
129  return DebugTestImpl<T, 4>(workloadFactory,
130  memoryManager,
131  inputTensorInfo,
132  outputTensorInfo,
133  input,
134  outputExpected,
135  desc,
136  expectedStringOutput);
137 }
138 
139 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
140 LayerTestResult<T, 3> Debug3dTest(
141  armnn::IWorkloadFactory& workloadFactory,
143 {
144  armnn::TensorInfo inputTensorInfo;
145  armnn::TensorInfo outputTensorInfo;
146 
147  unsigned int inputShape[] = {3, 3, 1};
148  unsigned int outputShape[] = {3, 3, 1};
149 
151  desc.m_Guid = 1;
152  desc.m_LayerName = "TestOutput";
153  desc.m_SlotIndex = 0;
154 
155  inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
156  outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
157 
158  std::vector<float> input = std::vector<float>(
159  {
160  1.0f, 2.0f, 3.0f,
161  4.0f, 5.0f, 6.0f,
162  7.0f, 8.0f, 9.0f,
163  });
164 
165  std::vector<float> outputExpected = std::vector<float>(
166  {
167  1.0f, 2.0f, 3.0f,
168  4.0f, 5.0f, 6.0f,
169  7.0f, 8.0f, 9.0f,
170  });
171 
172  const std::string expectedStringOutput =
173  "{ \"layerGuid\": 1,"
174  " \"layerName\": \"TestOutput\","
175  " \"outputSlot\": 0,"
176  " \"shape\": [3, 3, 1],"
177  " \"min\": 1, \"max\": 9,"
178  " \"data\": [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]] }\n";
179 
180  return DebugTestImpl<T, 3>(workloadFactory,
181  memoryManager,
182  inputTensorInfo,
183  outputTensorInfo,
184  input,
185  outputExpected,
186  desc,
187  expectedStringOutput);
188 }
189 
190 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
191 LayerTestResult<T, 2> Debug2dTest(
192  armnn::IWorkloadFactory& workloadFactory,
194 {
195  armnn::TensorInfo inputTensorInfo;
196  armnn::TensorInfo outputTensorInfo;
197 
198  unsigned int inputShape[] = {2, 2};
199  unsigned int outputShape[] = {2, 2};
200 
202  desc.m_Guid = 1;
203  desc.m_LayerName = "TestOutput";
204  desc.m_SlotIndex = 0;
205 
206  inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
207  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
208 
209  std::vector<float> input = std::vector<float>(
210  {
211  1.0f, 2.0f,
212  3.0f, 4.0f,
213  });
214 
215  std::vector<float> outputExpected = std::vector<float>(
216  {
217  1.0f, 2.0f,
218  3.0f, 4.0f,
219  });
220 
221  const std::string expectedStringOutput =
222  "{ \"layerGuid\": 1,"
223  " \"layerName\": \"TestOutput\","
224  " \"outputSlot\": 0,"
225  " \"shape\": [2, 2],"
226  " \"min\": 1, \"max\": 4,"
227  " \"data\": [[1, 2], [3, 4]] }\n";
228 
229  return DebugTestImpl<T, 2>(workloadFactory,
230  memoryManager,
231  inputTensorInfo,
232  outputTensorInfo,
233  input,
234  outputExpected,
235  desc,
236  expectedStringOutput);
237 }
238 
239 template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
240 LayerTestResult<T, 1> Debug1dTest(
241  armnn::IWorkloadFactory& workloadFactory,
243 {
244  armnn::TensorInfo inputTensorInfo;
245  armnn::TensorInfo outputTensorInfo;
246 
247  unsigned int inputShape[] = {4};
248  unsigned int outputShape[] = {4};
249 
251  desc.m_Guid = 1;
252  desc.m_LayerName = "TestOutput";
253  desc.m_SlotIndex = 0;
254 
255  inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
256  outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
257 
258  std::vector<float> input = std::vector<float>(
259  {
260  1.0f, 2.0f, 3.0f, 4.0f,
261  });
262 
263  std::vector<float> outputExpected = std::vector<float>(
264  {
265  1.0f, 2.0f, 3.0f, 4.0f,
266  });
267 
268  const std::string expectedStringOutput =
269  "{ \"layerGuid\": 1,"
270  " \"layerName\": \"TestOutput\","
271  " \"outputSlot\": 0,"
272  " \"shape\": [4],"
273  " \"min\": 1, \"max\": 4,"
274  " \"data\": [1, 2, 3, 4] }\n";
275 
276  return DebugTestImpl<T, 1>(workloadFactory,
277  memoryManager,
278  inputTensorInfo,
279  outputTensorInfo,
280  input,
281  outputExpected,
282  desc,
283  expectedStringOutput);
284 }
285 
286 } // anonymous namespace
287 
289  armnn::IWorkloadFactory& workloadFactory,
291 {
292  return Debug4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
293 }
294 
296  armnn::IWorkloadFactory& workloadFactory,
298 {
299  return Debug3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
300 }
301 
303  armnn::IWorkloadFactory& workloadFactory,
305 {
306  return Debug2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
307 }
308 
310  armnn::IWorkloadFactory& workloadFactory,
312 {
313  return Debug1dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
314 }
315 
317  armnn::IWorkloadFactory& workloadFactory,
319 {
320  return Debug4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
321 }
322 
324  armnn::IWorkloadFactory& workloadFactory,
326 {
327  return Debug3dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
328 }
329 
331  armnn::IWorkloadFactory& workloadFactory,
333 {
334  return Debug2dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
335 }
336 
338  armnn::IWorkloadFactory& workloadFactory,
340 {
341  return Debug1dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
342 }
343 
345  armnn::IWorkloadFactory& workloadFactory,
347 {
348  return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
349 }
350 
352  armnn::IWorkloadFactory& workloadFactory,
354 {
355  return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
356 }
357 
359  armnn::IWorkloadFactory& workloadFactory,
361 {
362  return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
363 }
364 
366  armnn::IWorkloadFactory& workloadFactory,
368 {
369  return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
370 }
371 
373  armnn::IWorkloadFactory& workloadFactory,
375 {
376  return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
377 }
378 
380  armnn::IWorkloadFactory& workloadFactory,
382 {
383  return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
384 }
385 
387  armnn::IWorkloadFactory& workloadFactory,
389 {
390  return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
391 }
392 
394  armnn::IWorkloadFactory& workloadFactory,
396 {
397  return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
398 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< int16_t, 3 > Debug3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 1 > Debug1dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 1 > Debug1dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 1 > Debug1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 1 > Debug1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 2 > Debug2dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
LayerTestResult< armnn::BFloat16, 3 > Debug3dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< armnn::BFloat16, 2 > Debug2dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > Debug4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > Debug3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 4 > Debug4dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > Debug4dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 2 > Debug2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > Debug4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 2 > Debug2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 3 > Debug3dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196