ArmNN
 22.05.01
RefTensorHandleTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
7 
8 #include <doctest/doctest.h>
9 
10 TEST_SUITE("RefTensorHandleTests")
11 {
12 using namespace armnn;
13 
14 TEST_CASE("AcquireAndRelease")
15 {
16  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
17 
18  TensorInfo info({ 1, 1, 1, 1 }, DataType::Float32);
19  RefTensorHandle handle(info, memoryManager);
20 
21  handle.Manage();
22  handle.Allocate();
23 
24  memoryManager->Acquire();
25  {
26  float* buffer = reinterpret_cast<float*>(handle.Map());
27 
28  CHECK(buffer != nullptr); // Yields a valid pointer
29 
30  buffer[0] = 2.5f;
31 
32  CHECK(buffer[0] == 2.5f); // Memory is writable and readable
33 
34  }
35  memoryManager->Release();
36 
37  memoryManager->Acquire();
38  {
39  float* buffer = reinterpret_cast<float*>(handle.Map());
40 
41  CHECK(buffer != nullptr); // Yields a valid pointer
42 
43  buffer[0] = 3.5f;
44 
45  CHECK(buffer[0] == 3.5f); // Memory is writable and readable
46  }
47  memoryManager->Release();
48 }
49 
50 TEST_CASE("RefTensorHandleFactoryMemoryManaged")
51 {
52  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
53  RefTensorHandleFactory handleFactory(memoryManager);
54  TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
55 
56  // create TensorHandle with memory managed
57  auto handle = handleFactory.CreateTensorHandle(info, true);
58  handle->Manage();
59  handle->Allocate();
60 
61  memoryManager->Acquire();
62  {
63  float* buffer = reinterpret_cast<float*>(handle->Map());
64  CHECK(buffer != nullptr); // Yields a valid pointer
65  buffer[0] = 1.5f;
66  buffer[1] = 2.5f;
67  CHECK(buffer[0] == 1.5f); // Memory is writable and readable
68  CHECK(buffer[1] == 2.5f); // Memory is writable and readable
69  }
70  memoryManager->Release();
71 
72  memoryManager->Acquire();
73  {
74  float* buffer = reinterpret_cast<float*>(handle->Map());
75  CHECK(buffer != nullptr); // Yields a valid pointer
76  buffer[0] = 3.5f;
77  buffer[1] = 4.5f;
78  CHECK(buffer[0] == 3.5f); // Memory is writable and readable
79  CHECK(buffer[1] == 4.5f); // Memory is writable and readable
80  }
81  memoryManager->Release();
82 
83  float testPtr[2] = { 2.5f, 5.5f };
84  // Cannot import as import is disabled
85  CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
86 }
87 
88 TEST_CASE("RefTensorHandleFactoryImport")
89 {
90  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
91  RefTensorHandleFactory handleFactory(memoryManager);
92  TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
93 
94  // create TensorHandle without memory managed
95  auto handle = handleFactory.CreateTensorHandle(info, false);
96  handle->Manage();
97  handle->Allocate();
98  memoryManager->Acquire();
99 
100  // No buffer allocated when import is enabled
101  CHECK_THROWS_AS(handle->Map(), armnn::NullPointerException);
102 
103  float testPtr[2] = { 2.5f, 5.5f };
104  // Correctly import
105  CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
106  float* buffer = reinterpret_cast<float*>(handle->Map());
107  CHECK(buffer != nullptr); // Yields a valid pointer after import
108  CHECK(buffer == testPtr); // buffer is pointing to testPtr
109  // Memory is writable and readable with correct value
110  CHECK(buffer[0] == 2.5f);
111  CHECK(buffer[1] == 5.5f);
112  buffer[0] = 3.5f;
113  buffer[1] = 10.0f;
114  CHECK(buffer[0] == 3.5f);
115  CHECK(buffer[1] == 10.0f);
116  memoryManager->Release();
117 }
118 
119 TEST_CASE("RefTensorHandleImport")
120 {
121  TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
122  RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
123 
124  handle.Manage();
125  handle.Allocate();
126 
127  // No buffer allocated when import is enabled
128  CHECK_THROWS_AS(handle.Map(), armnn::NullPointerException);
129 
130  float testPtr[2] = { 2.5f, 5.5f };
131  // Correctly import
132  CHECK(handle.Import(static_cast<void*>(testPtr), MemorySource::Malloc));
133  float* buffer = reinterpret_cast<float*>(handle.Map());
134  CHECK(buffer != nullptr); // Yields a valid pointer after import
135  CHECK(buffer == testPtr); // buffer is pointing to testPtr
136  // Memory is writable and readable with correct value
137  CHECK(buffer[0] == 2.5f);
138  CHECK(buffer[1] == 5.5f);
139  buffer[0] = 3.5f;
140  buffer[1] = 10.0f;
141  CHECK(buffer[0] == 3.5f);
142  CHECK(buffer[1] == 10.0f);
143 }
144 
145 TEST_CASE("RefTensorHandleGetCapabilities")
146 {
147  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
148  RefTensorHandleFactory handleFactory(memoryManager);
149 
150  // Builds up the structure of the network.
152  IConnectableLayer* input = net->AddInputLayer(0);
153  IConnectableLayer* output = net->AddOutputLayer(0);
154  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
155 
156  std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
157  output,
159  CHECK(capabilities.empty());
160 }
161 
162 TEST_CASE("RefTensorHandleSupportsInPlaceComputation")
163 {
164  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
165  RefTensorHandleFactory handleFactory(memoryManager);
166 
167  // RefTensorHandleFactory does not support InPlaceComputation
168  ARMNN_ASSERT(!(handleFactory.SupportsInPlaceComputation()));
169 }
170 
171 TEST_CASE("TestManagedConstTensorHandle")
172 {
173  // Initialize arguments
174  void* mem = nullptr;
176 
177  // Use PassthroughTensor as others are abstract
178  auto passThroughHandle = std::make_shared<PassthroughTensorHandle>(info, mem);
179 
180  // Test managed handle is initialized with m_Mapped unset and once Map() called its set
181  ManagedConstTensorHandle managedHandle(passThroughHandle);
182  CHECK(!managedHandle.IsMapped());
183  managedHandle.Map();
184  CHECK(managedHandle.IsMapped());
185 
186  // Test it can then be unmapped
187  managedHandle.Unmap();
188  CHECK(!managedHandle.IsMapped());
189 
190  // Test member function
191  CHECK(managedHandle.GetTensorInfo() == info);
192 
193  // Test that nullptr tensor handle doesn't get mapped
194  ManagedConstTensorHandle managedHandleNull(nullptr);
195  CHECK(!managedHandleNull.IsMapped());
196  CHECK_THROWS_AS(managedHandleNull.Map(), armnn::Exception);
197  CHECK(!managedHandleNull.IsMapped());
198 
199  // Check Unmap() when m_Mapped already false
200  managedHandleNull.Unmap();
201  CHECK(!managedHandleNull.IsMapped());
202 }
203 
204 #if !defined(__ANDROID__)
205 // Only run these tests on non Android platforms
206 TEST_CASE("CheckSourceType")
207 {
209  RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
210 
211  int* testPtr = new int(4);
212 
213  // Not supported
214  CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBuf));
215 
216  // Not supported
217  CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBufProtected));
218 
219  // Supported
220  CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
221 
222  delete testPtr;
223 }
224 
225 TEST_CASE("ReusePointer")
226 {
228  RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
229 
230  int* testPtr = new int(4);
231 
232  handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc);
233 
234  // Reusing previously Imported pointer
235  CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
236 
237  delete testPtr;
238 }
239 
240 TEST_CASE("MisalignedPointer")
241 {
243  RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
244 
245  // Allocate a 2 int array
246  int* testPtr = new int[2];
247 
248  // Increment pointer by 1 byte
249  void* misalignedPtr = static_cast<void*>(reinterpret_cast<char*>(testPtr) + 1);
250 
251  CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
252 
253  delete[] testPtr;
254 }
255 
256 TEST_CASE("CheckCanBeImported")
257 {
259  RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
260 
261  int* testPtr = new int(4);
262 
263  // Not supported
264  CHECK(!handle.CanBeImported(static_cast<void *>(testPtr), MemorySource::DmaBuf));
265 
266  // Supported
267  CHECK(handle.CanBeImported(static_cast<void *>(testPtr), MemorySource::Malloc));
268 
269  delete testPtr;
270 
271 }
272 
273 TEST_CASE("MisalignedCanBeImported")
274 {
276  RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
277 
278  // Allocate a 2 int array
279  int* testPtr = new int[2];
280 
281  // Increment pointer by 1 byte
282  void* misalignedPtr = static_cast<void*>(reinterpret_cast<char*>(testPtr) + 1);
283 
284  CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
285 
286  delete[] testPtr;
287 }
288 
289 #endif
290 
291 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
TEST_SUITE("RefTensorHandleTests")
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:476