ArmNN
 20.11
OptimizedNetworkTests.cpp File Reference
#include <Graph.hpp>
#include <Network.hpp>
#include <reference/RefWorkloadFactory.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (SerializeToDot)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerNoFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerWithFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsUndefinedComputeDevice)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/6]

BOOST_AUTO_TEST_CASE ( SerializeToDot  )

Definition at line 16 of file OptimizedNetworkTests.cpp.

References Network::AddAdditionLayer(), Network::AddInputLayer(), Network::AddOutputLayer(), IOutputSlot::Connect(), armnn::CpuRef, IRuntime::Create(), armnn::Float32, IConnectableLayer::GetOutputSlot(), and armnn::Optimize().

17 {
18  armnn::Network net;
19 
20  //Defines layers.
21  auto input = net.AddInputLayer(0);
22  auto add = net.AddAdditionLayer();
23  auto output = net.AddOutputLayer(0);
24 
25  // Connects layers.
26  input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
27  input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
28  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
29 
30  armnn::TensorShape shape({4});
32  input->GetOutputSlot(0).SetTensorInfo(info);
33  add->GetOutputSlot(0).SetTensorInfo(info);
34 
37 
38  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
39  armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
40 
41  std::ostringstream ss;
42  optimizedNet->SerializeToDot(ss);
43 
44  auto inputId = input->GetGuid();
45  auto addId = add->GetGuid();
46  auto outputId = output->GetGuid();
47 
48  std::stringstream expected;
49  expected <<
50  "digraph Optimized {\n"
51  " node [shape=\"record\"];\n"
52  " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
53  " " << inputId << " [label=\"{Input|Guid : " << inputId << "\\lLayerType : Input\\l"
54  "BackendID : CpuRef\\l}\"];\n"
55  " " << addId << " [label=\"{Addition|Guid : " << addId << "\\lLayerType : Addition\\l"
56  "BackendID : CpuRef\\l}\"];\n"
57  " " << outputId << " [label=\"{Output|Guid : " << outputId << "\\lLayerType : Output\\l"
58  "BackendID : CpuRef\\l}\"];\n"
59  " " << inputId << " -> " << addId << " [label=< [4] >];\n"
60  " " << inputId << " -> " << addId << " [label=< [4] >];\n"
61  " " << addId << " -> " << outputId << " [label=< [4] >];\n"
62  "}\n";
63 
64  BOOST_TEST(ss.str() == expected.str());
65 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr) override
Adds an output layer to the network.
Definition: Network.cpp:1467
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr) override
Adds an input layer to the network.
Definition: Network.cpp:1192
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:600
Private implementation of INetwork.
Definition: Network.hpp:28
IConnectableLayer * AddAdditionLayer(const char *name=nullptr) override
Adds an addition layer to the network.
Definition: Network.cpp:1457
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0

◆ BOOST_AUTO_TEST_CASE() [2/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerNoFallback  )

Definition at line 67 of file OptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), and IOutputSlot::SetTensorInfo().

68 {
69  // build up the structure of the network
71 
72  armnn::IConnectableLayer* input = net->AddInputLayer(0);
73 
74  // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
76  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
77 
78  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
79 
80  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
81  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
82 
85 
88 
89  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
90  std::vector<std::string> errMessages;
91 
92  try
93  {
94  Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
95  BOOST_FAIL("Should have thrown an exception.");
96  }
97  catch (const armnn::InvalidArgumentException& e)
98  {
99  // Different exceptions are thrown on different backends
100  }
101  BOOST_CHECK(errMessages.size() > 0);
102 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:46

◆ BOOST_AUTO_TEST_CASE() [3/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerWithFallback  )

Definition at line 104 of file OptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuAcc, armnn::CpuRef, INetwork::Create(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Input, armnn::Normalization, armnn::Optimize(), armnn::Output, and IOutputSlot::SetTensorInfo().

105 {
106  // build up the structure of the network
108 
109  armnn::IConnectableLayer* input = net->AddInputLayer(0);
110 
111  // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
113  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
114 
115  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
116 
117  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
118  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
119 
122 
125 
126  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
127  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
128  BOOST_REQUIRE(optNet);
129 
130  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
131  {
132  // If NEON is enabled, Input and Output layers are supported by CpuAcc,
133  // the other layers are supported by CpuRef.
134  // If NEON is not enabled, all layers are supported by CpuRef.
135 #if defined(ARMCOMPUTENEON_ENABLED)
136  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
137  {
138  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
139  }
140  else if (layer->GetType() == armnn::LayerType::Normalization)
141  {
142  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
143  }
144 #else
145  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
146 #endif
147  }
148 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:600
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:46

◆ BOOST_AUTO_TEST_CASE() [4/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsUndefinedComputeDevice  )

Definition at line 150 of file OptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), IOutputSlot::SetTensorInfo(), and armnn::Undefined.

151 {
152  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
153 
154  armnn::Network net;
155 
158 
159  // in
160  // |
161  // nm
162  // / |
163  // ac |
164  // \ |
165  // ml
166  // |
167  // sm
168  // |
169  // ot
170  armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
171  layer->GetOutputSlot(0).SetTensorInfo(desc);
172 
173  armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
174 
175  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
176  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
177 
178  layer = net.AddActivationLayer(acDesc, "ac");
179 
180  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
181  layer->GetOutputSlot(0).SetTensorInfo(desc);
182 
183  armnn::IConnectableLayer* prevLayer = layer;
184  layer = net.AddMultiplicationLayer("ml");
185 
186  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
187  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
188  layer->GetOutputSlot(0).SetTensorInfo(desc);
189 
190  prevLayer = layer;
191  armnn::SoftmaxDescriptor softmaxDescriptor;
192  layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
193 
194  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
195  layer->GetOutputSlot(0).SetTensorInfo(desc);
196 
197  prevLayer = layer;
198  layer = net.AddOutputLayer(0, "ot");
199 
200  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
201 
204 
205  std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
206  std::vector<std::string> errMessages;
207 
208  try
209  {
210  Optimize(net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
211  BOOST_FAIL("Should have thrown an exception.");
212  }
213  catch (const armnn::InvalidArgumentException& e)
214  {
215  // Different exceptions are thrown on different backends
216  }
217  BOOST_CHECK(errMessages.size() > 0);
218 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
Private implementation of INetwork.
Definition: Network.hpp:28
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
A SoftmaxDescriptor for the SoftmaxLayer.

◆ BOOST_AUTO_TEST_CASE() [5/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback  )

Definition at line 220 of file OptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuRef, IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), IOutputSlot::SetTensorInfo(), and armnn::Undefined.

221 {
222  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
223 
224  armnn::Network net;
225 
228 
229  // in
230  // |
231  // nm
232  // / |
233  // ac |
234  // \ |
235  // ml
236  // |
237  // sm
238  // |
239  // ot
240  armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
241  layer->GetOutputSlot(0).SetTensorInfo(desc);
242 
243  armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
244 
245  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
246  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
247 
248  layer = net.AddActivationLayer(acDesc, "ac");
249 
250  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
251  layer->GetOutputSlot(0).SetTensorInfo(desc);
252 
253  armnn::IConnectableLayer* prevLayer = layer;
254  layer = net.AddMultiplicationLayer("ml");
255 
256  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
257  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
258  layer->GetOutputSlot(0).SetTensorInfo(desc);
259 
260  prevLayer = layer;
261  armnn::SoftmaxDescriptor softmaxDescriptor;
262  layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
263 
264  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
265  layer->GetOutputSlot(0).SetTensorInfo(desc);
266 
267  prevLayer = layer;
268  layer = net.AddOutputLayer(0, "ot");
269 
270  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
271 
274 
275  std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
276 
277  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
278  BOOST_CHECK(optNet);
279 
280  // validate workloads
282  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
283  {
284  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
285  BOOST_CHECK_NO_THROW(
286  layer->CreateWorkload(fact));
287  }
288 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:600
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
Private implementation of INetwork.
Definition: Network.hpp:28
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
A SoftmaxDescriptor for the SoftmaxLayer.

◆ BOOST_AUTO_TEST_CASE() [6/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback  )

Definition at line 290 of file OptimizedNetworkTests.cpp.

References BOOST_AUTO_TEST_SUITE_END(), IOutputSlot::Connect(), armnn::CpuAcc, armnn::CpuRef, INetwork::Create(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::GpuAcc, armnn::Input, armnn::Normalization, armnn::Optimize(), armnn::Output, and IOutputSlot::SetTensorInfo().

291 {
292  // build up the structure of the network
294 
295  armnn::IConnectableLayer* input = net->AddInputLayer(0);
296 
297  // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
299  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
300 
301  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
302 
303  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
304  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
305 
308 
311 
312  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
315 
316  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
317  BOOST_REQUIRE(optNet);
318 
319  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
320  {
321  // If NEON is enabled, Input and Output layers are supported by CpuAcc,
322  // the other layers are supported by CpuRef.
323  // If only CL is enabled, Input and Output layers are supported by GpuAcc,
324  // the other layers are supported by CpuRef.
325  // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
326 #if defined(ARMCOMPUTENEON_ENABLED)
327  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
328  {
329  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
330  }
331  else if (layer->GetType() == armnn::LayerType::Normalization)
332  {
333  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
334  }
335 #elif defined(ARMCOMPUTECL_ENABLED)
336  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
337  {
338  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
339  }
340  else if (layer->GetType() == armnn::LayerType::Normalization)
341  {
342  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
343  }
344 #else
345  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
346 #endif
347  }
348 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:600
GPU Execution: OpenCL: ArmCompute.
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:46