ArmNN
 20.08
NeonFallbackTests.cpp File Reference

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (FallbackImportToCpuAcc)
 
 BOOST_AUTO_TEST_CASE (FallbackPaddingCopyToCpuAcc)
 
 BOOST_AUTO_TEST_CASE (FallbackImportFromCpuAcc)
 
 BOOST_AUTO_TEST_CASE (FallbackPaddingCopyFromCpuAcc)
 
 BOOST_AUTO_TEST_CASE (FallbackDisableImportFromCpuAcc)
 

Variables

std::vector< armnn::BackendIddefaultBackends = { armnn::Compute::CpuAcc }
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/5]

BOOST_AUTO_TEST_CASE ( FallbackImportToCpuAcc  )

Definition at line 17 of file NeonFallbackTests.cpp.

References armnn::BackendRegistryInstance(), CheckOrder(), IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), CreateBackendObject(), armnn::Float32, BackendRegistry::GetBackendIds(), GetFirstLayerWithName(), IConnectableLayer::GetInputSlot(), ProfilerManager::GetInstance(), IConnectableLayer::GetOutputSlot(), ProfilerManager::GetProfiler(), Layer::GetType(), armnn::info, OptimizerOptions::m_ImportEnabled, armnn::MemImport, armnn::MockImportBackendId(), armnn::Optimize(), Profiler::Print(), and IOutputSlot::SetTensorInfo().

18 {
19  using namespace armnn;
20 
21  // Create a mock backend object
22  MockImportBackendInitialiser initialiser; // Register the Mock Backend
23  auto backendObjPtr = CreateBackendObject(MockImportBackendId());
24  BOOST_TEST((backendObjPtr != nullptr));
25 
27  if (backendIds.find("MockRef") == backendIds.end())
28  {
29  std::string message = "Cannot load MockRef";
30  BOOST_FAIL(message);
31  }
32 
33  // Create runtime in which test will run and allow fallback to CpuRef.
35  IRuntimePtr runtime(IRuntime::Create(options));
36 
37  // Builds up the structure of the network.
39 
40  IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
41  IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
42  IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
43  IConnectableLayer* add = net->AddAdditionLayer("add");
44  IConnectableLayer* sub = net->AddSubtractionLayer("sub");
45  IConnectableLayer* output = net->AddOutputLayer(0, "output");
46 
47  input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
48  input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
49  input2->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
50  add->GetOutputSlot(0).Connect(sub->GetInputSlot(1));
51  sub->GetOutputSlot(0).Connect(output->GetInputSlot(0));
52 
53  TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
54 
55  input0->GetOutputSlot(0).SetTensorInfo(info);
56  input1->GetOutputSlot(0).SetTensorInfo(info);
57  input2->GetOutputSlot(0).SetTensorInfo(info);
58  add->GetOutputSlot(0).SetTensorInfo(info);
59  sub->GetOutputSlot(0).SetTensorInfo(info);
60 
61  // optimize the network
62  std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
63  OptimizerOptions optOptions;
64  optOptions.m_ImportEnabled = true;
65  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
66 
67  OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
68  Graph& graph = optNetObjPtr->GetGraph();
69 
70  armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
71  armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
72  armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "input2");
73  armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "add");
74  armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "[ add (0) -> sub (1) ]");
75  armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "sub");
76  armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
77 
78  // Checks order is valid.
79  BOOST_TEST(CheckOrder(graph, layer0, layer1));
80  BOOST_TEST(CheckOrder(graph, layer1, layer2));
81  BOOST_TEST(CheckOrder(graph, layer2, layer3));
82  BOOST_TEST(CheckOrder(graph, layer3, layer4));
83  BOOST_TEST(CheckOrder(graph, layer4, layer5));
84  BOOST_TEST(CheckOrder(graph, layer5, layer6));
85 
86  // Load it into the runtime. It should pass.
87  NetworkId netId;
88  std::string ignoredErrorMessage;
89  INetworkProperties networkProperties(true, true);
90 
91  runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
92 
93  // Creates structures for input & output
94  std::vector<float> inputData0
95  {
96  1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 6.0f
97  };
98  std::vector<float> inputData1
99  {
100  0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
101  };
102  std::vector<float> inputData2
103  {
104  12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
105  };
106 
107  std::vector<float> outputData(12);
108 
109  std::vector<float> expectedOutput
110  {
111  11.0f, 9.0f, 7.0f, 5.0f, 3.0f, 1.0f, -1.0f, -3.0f, -5.0f, -7.0f, -9.0f, -11.0f
112  };
113 
114  InputTensors inputTensors
115  {
116  { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
117  { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
118  { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
119  };
120  OutputTensors outputTensors
121  {
122  { 0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
123  };
124 
125  runtime->GetProfiler(netId)->EnableProfiling(true);
126 
127  // Do the inference
128  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
129 
130  // Retrieve the Profiler.Print() output to get the workload execution
132  std::stringstream ss;
133  profilerManager.GetProfiler()->Print(ss);;
134  std::string dump = ss.str();
135 
136  // Contains ImportMemGeneric
137  std::size_t found = dump.find("ImportMemGeneric");
138  BOOST_TEST(found != std::string::npos);
139 
140  // Contains SyncMemGeneric
141  found = dump.find("SyncMemGeneric");
142  BOOST_TEST(found != std::string::npos);
143 
144  // Does not contain CopyMemGeneric
145  found = dump.find("CopyMemGeneric");
146  BOOST_TEST(found == std::string::npos);
147 
148  // Use memory import between backends
149  BOOST_TEST((layer4->GetType() == LayerType::MemImport));
150 
151  // Check output is as expected
152  BOOST_TEST(outputData == expectedOutput);
153 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
BackendIdSet GetBackendIds() const
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:486
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphUtils.cpp:68
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:324
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
constexpr const char * MockImportBackendId()
Profiler * GetProfiler()
Definition: Profiling.cpp:498
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:290
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1014
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:325
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
CPU Execution: NEON: ArmCompute.
armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId &backendId)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
LayerType GetType() const
Definition: Layer.hpp:261
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
void Print(std::ostream &outStream) const override
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:330
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:50

◆ BOOST_AUTO_TEST_CASE() [2/5]

BOOST_AUTO_TEST_CASE ( FallbackPaddingCopyToCpuAcc  )

Definition at line 155 of file NeonFallbackTests.cpp.

References armnn::BackendRegistryInstance(), CheckOrder(), IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), CreateBackendObject(), armnn::Float32, BackendRegistry::GetBackendIds(), GetFirstLayerWithName(), IConnectableLayer::GetInputSlot(), ProfilerManager::GetInstance(), IConnectableLayer::GetOutputSlot(), ProfilerManager::GetProfiler(), Layer::GetType(), armnn::info, OptimizerOptions::m_ImportEnabled, armnn::MemCopy, armnn::MockImportBackendId(), armnn::Optimize(), Profiler::Print(), and IOutputSlot::SetTensorInfo().

156 {
157  using namespace armnn;
158 
159  // Create a mock backend object
160  MockImportBackendInitialiser initialiser; // Register the Mock Backend
161  auto backendObjPtr = CreateBackendObject(MockImportBackendId());
162  BOOST_TEST((backendObjPtr != nullptr));
163 
165  if (backendIds.find("MockRef") == backendIds.end())
166  {
167  std::string message = "Cannot load MockRef";
168  BOOST_FAIL(message);
169  }
170 
171  // Create runtime in which test will run and allow fallback to CpuRef.
173  IRuntimePtr runtime(IRuntime::Create(options));
174 
175  // Builds up the structure of the network.
177 
178  Pooling2dDescriptor desc;
179 
180  IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
181  IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
182  IConnectableLayer* add = net->AddAdditionLayer("add");
183  IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
184  IConnectableLayer* output = net->AddOutputLayer(0, "output");
185 
186  input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
187  input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
188  add->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
189  pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
190 
191  TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
192  TensorInfo poolingInfo = TensorInfo({ 1, 2, 1, 1 }, DataType::Float32);
193 
194  input0->GetOutputSlot(0).SetTensorInfo(info);
195  input1->GetOutputSlot(0).SetTensorInfo(info);
196  add->GetOutputSlot(0).SetTensorInfo(info);
197  pooling->GetOutputSlot(0).SetTensorInfo(poolingInfo);
198 
199  // optimize the network
200  std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
201  OptimizerOptions optOptions;
202  optOptions.m_ImportEnabled = true;
203  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
204 
205  OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
206  Graph& graph = optNetObjPtr->GetGraph();
207 
208  armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
209  armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
210  armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "add");
211  armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "[ add (0) -> pooling (0) ]");
212  armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "pooling");
213  armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
214 
215  // Checks order is valid.
216  BOOST_TEST(CheckOrder(graph, layer0, layer1));
217  BOOST_TEST(CheckOrder(graph, layer1, layer2));
218  BOOST_TEST(CheckOrder(graph, layer2, layer3));
219  BOOST_TEST(CheckOrder(graph, layer3, layer4));
220  BOOST_TEST(CheckOrder(graph, layer4, layer5));
221 
222  // Load it into the runtime. It should pass.
223  NetworkId netId;
224  std::string ignoredErrorMessage;
225  INetworkProperties networkProperties(true, true);
226 
227  runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
228 
229  // Creates structures for input & output
230  std::vector<float> inputData0
231  {
232  1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 6.0f
233  };
234  std::vector<float> inputData1
235  {
236  0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
237  };
238 
239  std::vector<float> outputData(2);
240 
241  std::vector<float> expectedOutput
242  {
243  6.0f, 12.0f
244  };
245 
246  InputTensors inputTensors
247  {
248  { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
249  { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) }
250  };
251  OutputTensors outputTensors
252  {
253  { 0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
254  };
255 
256  runtime->GetProfiler(netId)->EnableProfiling(true);
257 
258  // Do the inference
259  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
260 
261  // Retrieve the Profiler.Print() output to get the workload execution
263  std::stringstream ss;
264  profilerManager.GetProfiler()->Print(ss);;
265  std::string dump = ss.str();
266 
267  // Contains CopyMemGeneric between the backends
268  std::size_t found = dump.find("CopyMemGeneric");
269  BOOST_TEST(found != std::string::npos);
270 
271  // Contains SyncMemGeneric for the output
272  found = dump.find("SyncMemGeneric");
273  BOOST_TEST(found != std::string::npos);
274 
275  // Does not contain ImportMemGeneric
276  found = dump.find("ImportMemGeneric");
277  BOOST_TEST(found == std::string::npos);
278 
279  // Use memory import between backends
280  BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
281 
282  // Check output is as expected
283  BOOST_TEST(outputData == expectedOutput);
284 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
BackendIdSet GetBackendIds() const
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:486
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphUtils.cpp:68
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:324
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
constexpr const char * MockImportBackendId()
Profiler * GetProfiler()
Definition: Profiling.cpp:498
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:290
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1014
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:325
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
CPU Execution: NEON: ArmCompute.
armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId &backendId)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
LayerType GetType() const
Definition: Layer.hpp:261
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
void Print(std::ostream &outStream) const override
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:330
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:50

◆ BOOST_AUTO_TEST_CASE() [3/5]

BOOST_AUTO_TEST_CASE ( FallbackImportFromCpuAcc  )

Definition at line 286 of file NeonFallbackTests.cpp.

References armnn::BackendRegistryInstance(), CheckOrder(), IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), CreateBackendObject(), armnn::Float32, BackendRegistry::GetBackendIds(), GetFirstLayerWithName(), IConnectableLayer::GetInputSlot(), ProfilerManager::GetInstance(), IConnectableLayer::GetOutputSlot(), ProfilerManager::GetProfiler(), Layer::GetType(), armnn::info, OptimizerOptions::m_ImportEnabled, armnn::MemImport, armnn::MockImportBackendId(), armnn::Optimize(), Profiler::Print(), and IOutputSlot::SetTensorInfo().

287 {
288  using namespace armnn;
289 
290  // Create a mock backend object
291  MockImportBackendInitialiser initialiser; // Register the Mock Backend
292  auto backendObjPtr = CreateBackendObject(MockImportBackendId());
293  BOOST_TEST((backendObjPtr != nullptr));
294 
296  if (backendIds.find("MockRef") == backendIds.end())
297  {
298  std::string message = "Cannot load MockRef";
299  BOOST_FAIL(message);
300  }
301 
302  // Create runtime in which test will run and allow fallback to CpuRef.
304  IRuntimePtr runtime(IRuntime::Create(options));
305 
306  // Builds up the structure of the network.
308 
309  IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
310  IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
311  IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
312  IConnectableLayer* sub = net->AddSubtractionLayer("sub");
313  IConnectableLayer* add = net->AddAdditionLayer("add");
314  IConnectableLayer* output = net->AddOutputLayer(0, "output");
315 
316  input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
317  input1->GetOutputSlot(0).Connect(sub->GetInputSlot(1));
318  input2->GetOutputSlot(0).Connect(add->GetInputSlot(0));
319  sub->GetOutputSlot(0).Connect(add->GetInputSlot(1));
320  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
321 
322  TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
323 
324  input0->GetOutputSlot(0).SetTensorInfo(info);
325  input1->GetOutputSlot(0).SetTensorInfo(info);
326  input2->GetOutputSlot(0).SetTensorInfo(info);
327  sub->GetOutputSlot(0).SetTensorInfo(info);
328  add->GetOutputSlot(0).SetTensorInfo(info);
329 
330  // optimize the network
331  std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
332  OptimizerOptions optOptions;
333  optOptions.m_ImportEnabled = true;
334  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
335 
336  OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
337  Graph& graph = optNetObjPtr->GetGraph();
338 
339  armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
340  armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
341  armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "input2");
342  armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "sub");
343  armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "[ sub (0) -> add (1) ]");
344  armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "add");
345  armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
346 
347  // Checks order is valid.
348  BOOST_TEST(CheckOrder(graph, layer0, layer1));
349  BOOST_TEST(CheckOrder(graph, layer1, layer2));
350  BOOST_TEST(CheckOrder(graph, layer2, layer3));
351  BOOST_TEST(CheckOrder(graph, layer3, layer4));
352  BOOST_TEST(CheckOrder(graph, layer4, layer5));
353  BOOST_TEST(CheckOrder(graph, layer5, layer6));
354 
355  // Load it into the runtime. It should pass.
356  NetworkId netId;
357  std::string ignoredErrorMessage;
358  INetworkProperties networkProperties(true, true);
359 
360  runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
361 
362  // Creates structures for input & output
363  std::vector<float> inputData0
364  {
365  1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 0.0f
366  };
367  std::vector<float> inputData1
368  {
369  0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
370  };
371  std::vector<float> inputData2
372  {
373  12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
374  };
375 
376  std::vector<float> outputData(12);
377 
378  std::vector<float> expectedOutput
379  {
380  13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
381  };
382 
383  InputTensors inputTensors
384  {
385  { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
386  { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
387  { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
388  };
389  OutputTensors outputTensors
390  {
391  { 0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
392  };
393 
394  runtime->GetProfiler(netId)->EnableProfiling(true);
395 
396  // Do the inference
397  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
398 
399  // Retrieve the Profiler.Print() output to get the workload execution
401  std::stringstream ss;
402  profilerManager.GetProfiler()->Print(ss);;
403  std::string dump = ss.str();
404 
405  // Contains ImportMemGeneric
406  std::size_t found = dump.find("ImportMemGeneric");
407  BOOST_TEST(found != std::string::npos);
408 
409  // Contains SyncMemGeneric
410  found = dump.find("SyncMemGeneric");
411  BOOST_TEST(found != std::string::npos);
412 
413  // Does not contain CopyMemGeneric
414  found = dump.find("CopyMemGeneric");
415  BOOST_TEST(found == std::string::npos);
416 
417  // Use memory import between backends
418  BOOST_TEST((layer4->GetType() == LayerType::MemImport));
419 
420  // Check output is as expected
421  BOOST_TEST(outputData == expectedOutput);
422 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
BackendIdSet GetBackendIds() const
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:486
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphUtils.cpp:68
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:324
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
constexpr const char * MockImportBackendId()
Profiler * GetProfiler()
Definition: Profiling.cpp:498
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:290
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1014
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:325
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
CPU Execution: NEON: ArmCompute.
armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId &backendId)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
LayerType GetType() const
Definition: Layer.hpp:261
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
void Print(std::ostream &outStream) const override
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:330
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:50

◆ BOOST_AUTO_TEST_CASE() [4/5]

BOOST_AUTO_TEST_CASE ( FallbackPaddingCopyFromCpuAcc  )

Definition at line 424 of file NeonFallbackTests.cpp.

References armnn::BackendRegistryInstance(), CheckOrder(), IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), CreateBackendObject(), armnn::Float32, BackendRegistry::GetBackendIds(), GetFirstLayerWithName(), IConnectableLayer::GetInputSlot(), ProfilerManager::GetInstance(), IConnectableLayer::GetOutputSlot(), ProfilerManager::GetProfiler(), Layer::GetType(), OptimizerOptions::m_ImportEnabled, armnn::MemCopy, armnn::MockImportBackendId(), armnn::Optimize(), Profiler::Print(), and IOutputSlot::SetTensorInfo().

425 {
426  using namespace armnn;
427 
428  // Create a mock backend object
429  MockImportBackendInitialiser initialiser; // Register the Mock Backend
430  auto backendObjPtr = CreateBackendObject(MockImportBackendId());
431  BOOST_TEST((backendObjPtr != nullptr));
432 
434  if (backendIds.find("MockRef") == backendIds.end())
435  {
436  std::string message = "Cannot load MockRef";
437  BOOST_FAIL(message);
438  }
439 
440  // Create runtime in which test will run and allow fallback to CpuRef.
442  IRuntimePtr runtime(IRuntime::Create(options));
443 
444  // Builds up the structure of the network.
446 
447  Pooling2dDescriptor desc;
448 
449  IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
450  IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
451  IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling");
452  IConnectableLayer* add = net->AddAdditionLayer("add");
453  IConnectableLayer* output = net->AddOutputLayer(0, "output");
454 
455  input0->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
456  input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
457  pooling->GetOutputSlot(0).Connect(add->GetInputSlot(0));
458  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
459 
460  TensorInfo inputInfo = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
461  TensorInfo poolingInfo = TensorInfo({ 1, 2, 1, 1 }, DataType::Float32);
462 
463  input0->GetOutputSlot(0).SetTensorInfo(inputInfo);
464  input1->GetOutputSlot(0).SetTensorInfo(poolingInfo);
465  pooling->GetOutputSlot(0).SetTensorInfo(poolingInfo);
466  add->GetOutputSlot(0).SetTensorInfo(poolingInfo);
467 
468  // optimize the network
469  std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
470  OptimizerOptions optOptions;
471  optOptions.m_ImportEnabled = true;
472  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
473 
474  OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
475  Graph& graph = optNetObjPtr->GetGraph();
476 
477  armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
478  armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
479  armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "pooling");
480  armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "[ pooling (0) -> add (0) ]");
481  armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "add");
482  armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
483 
484  // Checks order is valid.
485  BOOST_TEST(CheckOrder(graph, layer0, layer1));
486  BOOST_TEST(CheckOrder(graph, layer1, layer2));
487  BOOST_TEST(CheckOrder(graph, layer2, layer3));
488  BOOST_TEST(CheckOrder(graph, layer3, layer4));
489  BOOST_TEST(CheckOrder(graph, layer4, layer5));
490 
491  // Load it into the runtime. It should pass.
492  NetworkId netId;
493  std::string ignoredErrorMessage;
494  INetworkProperties networkProperties(true, true);
495 
496  runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
497 
498  // Creates structures for input & output
499  std::vector<float> inputData0
500  {
501  1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f
502  };
503  std::vector<float> inputData1
504  {
505  -1.0f, 3.0f
506  };
507 
508  std::vector<float> outputData(2);
509 
510  std::vector<float> expectedOutput
511  {
512  5.0f, 15.0f
513  };
514 
515  InputTensors inputTensors
516  {
517  { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
518  { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) }
519  };
520  OutputTensors outputTensors
521  {
522  { 0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
523  };
524 
525  runtime->GetProfiler(netId)->EnableProfiling(true);
526 
527  // Do the inference
528  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
529 
530  // Retrieve the Profiler.Print() output to get the workload execution
532  std::stringstream ss;
533  profilerManager.GetProfiler()->Print(ss);;
534  std::string dump = ss.str();
535 
536  // Contains CopyMemGeneric between the backends
537  std::size_t found = dump.find("CopyMemGeneric");
538  BOOST_TEST(found != std::string::npos);
539 
540  // Contains SyncMemGeneric for the output
541  found = dump.find("SyncMemGeneric");
542  BOOST_TEST(found != std::string::npos);
543 
544  // Does not contain ImportMemGeneric
545  found = dump.find("ImportMemGeneric");
546  BOOST_TEST(found == std::string::npos);
547 
548  // Use memory import between backends
549  BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
550 
551  // Check output is as expected
552  BOOST_TEST(outputData == expectedOutput);
553 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
BackendIdSet GetBackendIds() const
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:486
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphUtils.cpp:68
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:324
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
constexpr const char * MockImportBackendId()
Profiler * GetProfiler()
Definition: Profiling.cpp:498
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:290
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1014
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:325
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
CPU Execution: NEON: ArmCompute.
armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId &backendId)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
LayerType GetType() const
Definition: Layer.hpp:261
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
void Print(std::ostream &outStream) const override
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:330
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:50

◆ BOOST_AUTO_TEST_CASE() [5/5]

BOOST_AUTO_TEST_CASE ( FallbackDisableImportFromCpuAcc  )

Definition at line 555 of file NeonFallbackTests.cpp.

References armnn::BackendRegistryInstance(), BOOST_AUTO_TEST_SUITE_END(), CheckOrder(), IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), CreateBackendObject(), armnn::Float32, BackendRegistry::GetBackendIds(), GetFirstLayerWithName(), IConnectableLayer::GetInputSlot(), ProfilerManager::GetInstance(), IConnectableLayer::GetOutputSlot(), ProfilerManager::GetProfiler(), Layer::GetType(), armnn::info, armnn::MemCopy, armnn::MockImportBackendId(), armnn::Optimize(), Profiler::Print(), and IOutputSlot::SetTensorInfo().

556 {
557  using namespace armnn;
558 
559  // Create a mock backend object
560  MockImportBackendInitialiser initialiser; // Register the Mock Backend
561  auto backendObjPtr = CreateBackendObject(MockImportBackendId());
562  BOOST_TEST((backendObjPtr != nullptr));
563 
565  if (backendIds.find("MockRef") == backendIds.end())
566  {
567  std::string message = "Cannot load MockRef";
568  BOOST_FAIL(message);
569  }
570 
571  // Create runtime in which test will run and allow fallback to CpuRef.
573  IRuntimePtr runtime(IRuntime::Create(options));
574 
575  // Builds up the structure of the network.
577 
578  IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
579  IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
580  IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
581  IConnectableLayer* sub = net->AddSubtractionLayer("sub");
582  IConnectableLayer* add = net->AddAdditionLayer("add");
583  IConnectableLayer* output = net->AddOutputLayer(0, "output");
584 
585  input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
586  input1->GetOutputSlot(0).Connect(sub->GetInputSlot(1));
587  input2->GetOutputSlot(0).Connect(add->GetInputSlot(0));
588  sub->GetOutputSlot(0).Connect(add->GetInputSlot(1));
589  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
590 
591  TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
592 
593  input0->GetOutputSlot(0).SetTensorInfo(info);
594  input1->GetOutputSlot(0).SetTensorInfo(info);
595  input2->GetOutputSlot(0).SetTensorInfo(info);
596  sub->GetOutputSlot(0).SetTensorInfo(info);
597  add->GetOutputSlot(0).SetTensorInfo(info);
598 
599  // optimize the network
600  std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
601  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
602 
603  OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
604  Graph& graph = optNetObjPtr->GetGraph();
605 
606  armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
607  armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
608  armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "input2");
609  armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "sub");
610  armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "[ sub (0) -> add (1) ]");
611  armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "add");
612  armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
613 
614  // Checks order is valid.
615  BOOST_TEST(CheckOrder(graph, layer0, layer1));
616  BOOST_TEST(CheckOrder(graph, layer1, layer2));
617  BOOST_TEST(CheckOrder(graph, layer2, layer3));
618  BOOST_TEST(CheckOrder(graph, layer3, layer4));
619  BOOST_TEST(CheckOrder(graph, layer4, layer5));
620  BOOST_TEST(CheckOrder(graph, layer5, layer6));
621 
622  // Load it into the runtime. It should pass.
623  NetworkId netId;
624  std::string ignoredErrorMessage;
625  INetworkProperties networkProperties(false, false);
626 
627  runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
628 
629  // Creates structures for input & output
630  std::vector<float> inputData0
631  {
632  1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 0.0f
633  };
634  std::vector<float> inputData1
635  {
636  0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
637  };
638  std::vector<float> inputData2
639  {
640  12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
641  };
642 
643  std::vector<float> outputData(12);
644 
645  std::vector<float> expectedOutput
646  {
647  13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
648  };
649 
650  InputTensors inputTensors
651  {
652  { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
653  { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
654  { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
655  };
656  OutputTensors outputTensors
657  {
658  { 0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
659  };
660 
661  runtime->GetProfiler(netId)->EnableProfiling(true);
662 
663  // Do the inference
664  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
665 
666  // Retrieve the Profiler.Print() output to get the workload execution
668  std::stringstream ss;
669  profilerManager.GetProfiler()->Print(ss);;
670  std::string dump = ss.str();
671 
672  // Contains CopyMemGeneric between the backends
673  std::size_t found = dump.find("CopyMemGeneric");
674  BOOST_TEST(found != std::string::npos);
675 
676  // Does not contain ImportMemGeneric
677  found = dump.find("ImportMemGeneric");
678  BOOST_TEST(found == std::string::npos);
679 
680  // Use memory import between backends
681  BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
682 
683  // Check output is as expected
684  BOOST_TEST(outputData == expectedOutput);
685 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
BackendIdSet GetBackendIds() const
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:486
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphUtils.cpp:68
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:324
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
constexpr const char * MockImportBackendId()
Profiler * GetProfiler()
Definition: Profiling.cpp:498
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:290
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1014
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:325
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
CPU Execution: NEON: ArmCompute.
armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId &backendId)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
LayerType GetType() const
Definition: Layer.hpp:261
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
void Print(std::ostream &outStream) const override
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:330
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:50

Variable Documentation

◆ defaultBackends

std::vector<armnn::BackendId> defaultBackends = { armnn::Compute::CpuAcc }

Definition at line 15 of file NeonFallbackTests.cpp.