ArmNN
 21.02
OptimizedNetworkTests.cpp File Reference
#include <Graph.hpp>
#include <Network.hpp>
#include <reference/RefWorkloadFactory.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (SerializeToDot)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerNoFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerWithFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsUndefinedComputeDevice)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/6]

BOOST_AUTO_TEST_CASE ( SerializeToDot  )

Definition at line 16 of file OptimizedNetworkTests.cpp.

References armnn::CpuRef, IRuntime::Create(), INetwork::Create(), armnn::Float32, and armnn::Optimize().

17 {
18  // build up the structure of the network
20 
21  //Defines layers.
22  auto input = net->AddInputLayer(0);
23  auto add = net->AddAdditionLayer();
24  auto output = net->AddOutputLayer(0);
25 
26  // Connects layers.
27  input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
28  input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
29  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
30 
31  armnn::TensorShape shape({4});
33  input->GetOutputSlot(0).SetTensorInfo(info);
34  add->GetOutputSlot(0).SetTensorInfo(info);
35 
38 
39  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
40  armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
41 
42  std::ostringstream ss;
43  optimizedNet->SerializeToDot(ss);
44 
45  auto inputId = input->GetGuid();
46  auto addId = add->GetGuid();
47  auto outputId = output->GetGuid();
48 
49  std::stringstream expected;
50  expected <<
51  "digraph Optimized {\n"
52  " node [shape=\"record\"];\n"
53  " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
54  " " << inputId << " [label=\"{Input|Guid : " << inputId << "\\lLayerType : Input\\l"
55  "BackendID : CpuRef\\l}\"];\n"
56  " " << addId << " [label=\"{Addition|Guid : " << addId << "\\lLayerType : Addition\\l"
57  "BackendID : CpuRef\\l}\"];\n"
58  " " << outputId << " [label=\"{Output|Guid : " << outputId << "\\lLayerType : Output\\l"
59  "BackendID : CpuRef\\l}\"];\n"
60  " " << inputId << " -> " << addId << " [label=< [4] >];\n"
61  " " << inputId << " -> " << addId << " [label=< [4] >];\n"
62  " " << addId << " -> " << outputId << " [label=< [4] >];\n"
63  "}\n";
64 
65  BOOST_TEST(ss.str() == expected.str());
66 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ BOOST_AUTO_TEST_CASE() [2/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerNoFallback  )

Definition at line 68 of file OptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuAcc, IRuntime::Create(), INetwork::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), and IOutputSlot::SetTensorInfo().

69 {
70  // build up the structure of the network
72 
73  armnn::IConnectableLayer* input = net->AddInputLayer(0);
74 
75  // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
77  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
78 
79  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
80 
81  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
82  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
83 
86 
89 
90  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
91  std::vector<std::string> errMessages;
92 
93  try
94  {
95  Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
96  BOOST_FAIL("Should have thrown an exception.");
97  }
98  catch (const armnn::InvalidArgumentException& e)
99  {
100  // Different exceptions are thrown on different backends
101  }
102  BOOST_CHECK(errMessages.size() > 0);
103 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ BOOST_AUTO_TEST_CASE() [3/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerWithFallback  )

Definition at line 105 of file OptimizedNetworkTests.cpp.

References Graph::AllocateDynamicBuffers(), IOutputSlot::Connect(), armnn::CpuAcc, armnn::CpuRef, IRuntime::Create(), INetwork::Create(), armnn::Float32, armnn::GetGraphForTesting(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Input, armnn::Normalization, armnn::Optimize(), armnn::Output, and IOutputSlot::SetTensorInfo().

106 {
107  // build up the structure of the network
109 
110  armnn::IConnectableLayer* input = net->AddInputLayer(0);
111 
112  // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
114  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
115 
116  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
117 
118  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
119  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
120 
123 
126 
127  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
128  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
129  BOOST_REQUIRE(optNet);
130 
131  armnn::Graph& graph = GetGraphForTesting(optNet.get());
132  graph.AllocateDynamicBuffers();
133 
134  for (auto&& layer : graph)
135  {
136  // If NEON is enabled, Input and Output layers are supported by CpuAcc,
137  // the other layers are supported by CpuRef.
138  // If NEON is not enabled, all layers are supported by CpuRef.
139 #if defined(ARMCOMPUTENEON_ENABLED)
140  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
141  {
142  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
143  }
144  else if (layer->GetType() == armnn::LayerType::Normalization)
145  {
146  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
147  }
148 #else
149  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
150 #endif
151  }
152 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:179
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ BOOST_AUTO_TEST_CASE() [4/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsUndefinedComputeDevice  )

Definition at line 154 of file OptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), IRuntime::Create(), INetwork::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), IOutputSlot::SetTensorInfo(), and armnn::Undefined.

155 {
156  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
157 
158  // build up the structure of the network
160 
163 
164  // in
165  // |
166  // nm
167  // / |
168  // ac |
169  // \ |
170  // ml
171  // |
172  // sm
173  // |
174  // ot
175  armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
176  layer->GetOutputSlot(0).SetTensorInfo(desc);
177 
178  armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
179 
180  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
181  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
182 
183  layer = net->AddActivationLayer(acDesc, "ac");
184 
185  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
186  layer->GetOutputSlot(0).SetTensorInfo(desc);
187 
188  armnn::IConnectableLayer* prevLayer = layer;
189  layer = net->AddMultiplicationLayer("ml");
190 
191  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
192  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
193  layer->GetOutputSlot(0).SetTensorInfo(desc);
194 
195  prevLayer = layer;
196  armnn::SoftmaxDescriptor softmaxDescriptor;
197  layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
198 
199  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
200  layer->GetOutputSlot(0).SetTensorInfo(desc);
201 
202  prevLayer = layer;
203  layer = net->AddOutputLayer(0, "ot");
204 
205  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
206 
209 
210  std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
211  std::vector<std::string> errMessages;
212 
213  try
214  {
215  Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
216  BOOST_FAIL("Should have thrown an exception.");
217  }
218  catch (const armnn::InvalidArgumentException& e)
219  {
220  // Different exceptions are thrown on different backends
221  }
222  BOOST_CHECK(errMessages.size() > 0);
223 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510
A SoftmaxDescriptor for the SoftmaxLayer.

◆ BOOST_AUTO_TEST_CASE() [5/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback  )

Definition at line 225 of file OptimizedNetworkTests.cpp.

References Graph::AllocateDynamicBuffers(), IOutputSlot::Connect(), armnn::CpuRef, IRuntime::Create(), INetwork::Create(), armnn::Float32, armnn::GetGraphForTesting(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), IOutputSlot::SetTensorInfo(), and armnn::Undefined.

226 {
227  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
228 
229  // build up the structure of the network
231 
234 
235  // in
236  // |
237  // nm
238  // / |
239  // ac |
240  // \ |
241  // ml
242  // |
243  // sm
244  // |
245  // ot
246  armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
247  layer->GetOutputSlot(0).SetTensorInfo(desc);
248 
249  armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
250 
251  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
252  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
253 
254  layer = net->AddActivationLayer(acDesc, "ac");
255 
256  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
257  layer->GetOutputSlot(0).SetTensorInfo(desc);
258 
259  armnn::IConnectableLayer* prevLayer = layer;
260  layer = net->AddMultiplicationLayer("ml");
261 
262  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
263  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
264  layer->GetOutputSlot(0).SetTensorInfo(desc);
265 
266  prevLayer = layer;
267  armnn::SoftmaxDescriptor softmaxDescriptor;
268  layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
269 
270  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
271  layer->GetOutputSlot(0).SetTensorInfo(desc);
272 
273  prevLayer = layer;
274  layer = net->AddOutputLayer(0, "ot");
275 
276  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
277 
280 
281  std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
282 
283  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
284  BOOST_CHECK(optNet);
285 
286  armnn::Graph& graph = GetGraphForTesting(optNet.get());
287  graph.AllocateDynamicBuffers();
288 
289  // validate workloads
291  for (auto&& layer : graph)
292  {
293  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
294  BOOST_CHECK_NO_THROW(
295  layer->CreateWorkload(fact));
296  }
297 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:179
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510
A SoftmaxDescriptor for the SoftmaxLayer.

◆ BOOST_AUTO_TEST_CASE() [6/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback  )

Definition at line 299 of file OptimizedNetworkTests.cpp.

References Graph::AllocateDynamicBuffers(), BOOST_AUTO_TEST_SUITE_END(), IOutputSlot::Connect(), armnn::CpuAcc, armnn::CpuRef, IRuntime::Create(), INetwork::Create(), armnn::Float32, armnn::GetGraphForTesting(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::GpuAcc, armnn::Input, armnn::Normalization, armnn::Optimize(), armnn::Output, and IOutputSlot::SetTensorInfo().

300 {
301  // build up the structure of the network
303 
304  armnn::IConnectableLayer* input = net->AddInputLayer(0);
305 
306  // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
308  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
309 
310  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
311 
312  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
313  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
314 
317 
320 
321  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
324 
325  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
326  BOOST_REQUIRE(optNet);
327 
328  armnn::Graph& graph = GetGraphForTesting(optNet.get());
329  graph.AllocateDynamicBuffers();
330 
331  for (auto&& layer : graph)
332  {
333  // If NEON is enabled, Input and Output layers are supported by CpuAcc,
334  // the other layers are supported by CpuRef.
335  // If only CL is enabled, Input and Output layers are supported by GpuAcc,
336  // the other layers are supported by CpuRef.
337  // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
338 #if defined(ARMCOMPUTENEON_ENABLED)
339  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
340  {
341  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
342  }
343  else if (layer->GetType() == armnn::LayerType::Normalization)
344  {
345  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
346  }
347 #elif defined(ARMCOMPUTECL_ENABLED)
348  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
349  {
350  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
351  }
352  else if (layer->GetType() == armnn::LayerType::Normalization)
353  {
354  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
355  }
356 #else
357  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
358 #endif
359  }
360 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
GPU Execution: OpenCL: ArmCompute.
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:179
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510