ArmNN
 20.02
OptimizedNetworkTests.cpp File Reference
#include <Graph.hpp>
#include <Network.hpp>
#include <reference/RefWorkloadFactory.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (SerializeToDot)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerNoFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerWithFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsUndefinedComputeDevice)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/6]

BOOST_AUTO_TEST_CASE ( SerializeToDot  )

Definition at line 16 of file OptimizedNetworkTests.cpp.

References Network::AddAdditionLayer(), Network::AddInputLayer(), Network::AddOutputLayer(), IOutputSlot::Connect(), armnn::CpuRef, IRuntime::Create(), armnn::Float32, IConnectableLayer::GetOutputSlot(), armnn::Optimize(), and options.

17 {
18  armnn::Network net;
19 
20  //Defines layers.
21  auto input = net.AddInputLayer(0);
22  auto add = net.AddAdditionLayer();
23  auto output = net.AddOutputLayer(0);
24 
25  // Connects layers.
26  input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
27  input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
28  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
29 
30  armnn::TensorShape shape({4});
32  input->GetOutputSlot(0).SetTensorInfo(info);
33  add->GetOutputSlot(0).SetTensorInfo(info);
34 
37 
38  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
39  armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
40 
41  std::ostringstream ss;
42  optimizedNet->SerializeToDot(ss);
43 
44  auto inputId = input->GetGuid();
45  auto addId = add->GetGuid();
46  auto outputId = output->GetGuid();
47 
48  std::stringstream expected;
49  expected <<
50  "digraph Optimized {\n"
51  " node [shape=\"record\"];\n"
52  " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
53  " " << inputId << " [label=\"{Input|LayerType : Input\\lBackendID : CpuRef\\l}\"];\n"
54  " " << addId << " [label=\"{Addition|LayerType : Addition\\lBackendID : CpuRef\\l}\"];\n"
55  " " << outputId << " [label=\"{Output|LayerType : Output\\lBackendID : CpuRef\\l}\"];\n"
56  " " << inputId << " -> " << addId << " [label=< [4] >];\n"
57  " " << inputId << " -> " << addId << " [label=< [4] >];\n"
58  " " << addId << " -> " << outputId << " [label=< [4] >];\n"
59  "}\n";
60 
61  BOOST_TEST(ss.str() == expected.str());
62 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
IConnectableLayer * AddOutputLayer(LayerBindingId id, const char *name=nullptr) override
Adds an output layer to the network.
Definition: Network.cpp:1310
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
IConnectableLayer * AddInputLayer(LayerBindingId id, const char *name=nullptr) override
Adds an input layer to the network.
Definition: Network.cpp:1041
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
Private implementation of INetwork.
Definition: Network.hpp:28
IConnectableLayer * AddAdditionLayer(const char *name=nullptr) override
Adds an addition layer to the network.
Definition: Network.cpp:1300
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options

◆ BOOST_AUTO_TEST_CASE() [2/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerNoFallback  )

Definition at line 64 of file OptimizedNetworkTests.cpp.

References BOOST_CHECK(), IOutputSlot::Connect(), armnn::CpuAcc, INetwork::Create(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), options, and IOutputSlot::SetTensorInfo().

65 {
66  // build up the structure of the network
68 
69  armnn::IConnectableLayer* input = net->AddInputLayer(0);
70 
71  // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
73  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
74 
75  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
76 
77  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
78  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
79 
82 
85 
86  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
87  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
88  BOOST_CHECK(!optNet);
89 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create()
Definition: Network.cpp:49

◆ BOOST_AUTO_TEST_CASE() [3/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerWithFallback  )

Definition at line 91 of file OptimizedNetworkTests.cpp.

References BOOST_CHECK(), IOutputSlot::Connect(), armnn::CpuAcc, armnn::CpuRef, INetwork::Create(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Input, armnn::Normalization, armnn::Optimize(), options, armnn::Output, and IOutputSlot::SetTensorInfo().

92 {
93  // build up the structure of the network
95 
96  armnn::IConnectableLayer* input = net->AddInputLayer(0);
97 
98  // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
100  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
101 
102  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
103 
104  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
105  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
106 
109 
112 
113  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
114  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
115  BOOST_REQUIRE(optNet);
116 
117  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
118  {
119  // If NEON is enabled, Input and Output layers are supported by CpuAcc,
120  // the other layers are supported by CpuRef.
121  // If NEON is not enabled, all layers are supported by CpuRef.
122 #if defined(ARMCOMPUTENEON_ENABLED)
123  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
124  {
125  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
126  }
127  else if (layer->GetType() == armnn::LayerType::Normalization)
128  {
129  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
130  }
131 #else
132  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
133 #endif
134  }
135 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create()
Definition: Network.cpp:49

◆ BOOST_AUTO_TEST_CASE() [4/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsUndefinedComputeDevice  )

Definition at line 137 of file OptimizedNetworkTests.cpp.

References BOOST_CHECK(), IOutputSlot::Connect(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), options, IOutputSlot::SetTensorInfo(), and armnn::Undefined.

138 {
139  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
140 
141  armnn::Network net;
142 
145 
146  // in
147  // |
148  // nm
149  // / |
150  // ac |
151  // \ |
152  // ml
153  // |
154  // sm
155  // |
156  // ot
157  armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
158  layer->GetOutputSlot(0).SetTensorInfo(desc);
159 
160  armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
161 
162  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
163  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
164 
165  layer = net.AddActivationLayer(acDesc, "ac");
166 
167  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
168  layer->GetOutputSlot(0).SetTensorInfo(desc);
169 
170  armnn::IConnectableLayer* prevLayer = layer;
171  layer = net.AddMultiplicationLayer("ml");
172 
173  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
174  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
175  layer->GetOutputSlot(0).SetTensorInfo(desc);
176 
177  prevLayer = layer;
178  armnn::SoftmaxDescriptor softmaxDescriptor;
179  layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
180 
181  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
182  layer->GetOutputSlot(0).SetTensorInfo(desc);
183 
184  prevLayer = layer;
185  layer = net.AddOutputLayer(0, "ot");
186 
187  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
188 
191 
192  std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
193 
194  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
195  BOOST_CHECK(!optNet);
196 
197 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
Private implementation of INetwork.
Definition: Network.hpp:28
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
A SoftmaxDescriptor for the SoftmaxLayer.

◆ BOOST_AUTO_TEST_CASE() [5/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback  )

Definition at line 199 of file OptimizedNetworkTests.cpp.

References BOOST_CHECK(), IOutputSlot::Connect(), armnn::CpuRef, IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), options, IOutputSlot::SetTensorInfo(), and armnn::Undefined.

200 {
201  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
202 
203  armnn::Network net;
204 
207 
208  // in
209  // |
210  // nm
211  // / |
212  // ac |
213  // \ |
214  // ml
215  // |
216  // sm
217  // |
218  // ot
219  armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
220  layer->GetOutputSlot(0).SetTensorInfo(desc);
221 
222  armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
223 
224  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
225  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
226 
227  layer = net.AddActivationLayer(acDesc, "ac");
228 
229  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
230  layer->GetOutputSlot(0).SetTensorInfo(desc);
231 
232  armnn::IConnectableLayer* prevLayer = layer;
233  layer = net.AddMultiplicationLayer("ml");
234 
235  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
236  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
237  layer->GetOutputSlot(0).SetTensorInfo(desc);
238 
239  prevLayer = layer;
240  armnn::SoftmaxDescriptor softmaxDescriptor;
241  layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
242 
243  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
244  layer->GetOutputSlot(0).SetTensorInfo(desc);
245 
246  prevLayer = layer;
247  layer = net.AddOutputLayer(0, "ot");
248 
249  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
250 
253 
254  std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
255 
256  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
257  BOOST_CHECK(optNet);
258 
259  // validate workloads
261  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
262  {
263  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
264  BOOST_CHECK_NO_THROW(
265  layer->CreateWorkload(fact));
266  }
267 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
Private implementation of INetwork.
Definition: Network.hpp:28
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
A SoftmaxDescriptor for the SoftmaxLayer.

◆ BOOST_AUTO_TEST_CASE() [6/6]

BOOST_AUTO_TEST_CASE ( OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback  )

Definition at line 269 of file OptimizedNetworkTests.cpp.

References BOOST_AUTO_TEST_SUITE_END(), BOOST_CHECK(), IOutputSlot::Connect(), armnn::CpuAcc, armnn::CpuRef, INetwork::Create(), IRuntime::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::GpuAcc, armnn::Input, armnn::Normalization, armnn::Optimize(), options, armnn::Output, and IOutputSlot::SetTensorInfo().

270 {
271  // build up the structure of the network
273 
274  armnn::IConnectableLayer* input = net->AddInputLayer(0);
275 
276  // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
278  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
279 
280  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
281 
282  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
283  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
284 
287 
290 
291  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
294 
295  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
296  BOOST_REQUIRE(optNet);
297 
298  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
299  {
300  // If NEON is enabled, Input and Output layers are supported by CpuAcc,
301  // the other layers are supported by CpuRef.
302  // If only CL is enabled, Input and Output layers are supported by GpuAcc,
303  // the other layers are supported by CpuRef.
304  // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
305 #if defined(ARMCOMPUTENEON_ENABLED)
306  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
307  {
308  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
309  }
310  else if (layer->GetType() == armnn::LayerType::Normalization)
311  {
312  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
313  }
314 #elif defined(ARMCOMPUTECL_ENABLED)
315  if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
316  {
317  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
318  }
319  else if (layer->GetType() == armnn::LayerType::Normalization)
320  {
321  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
322  }
323 #else
324  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
325 #endif
326  }
327 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
GPU Execution: OpenCL: ArmCompute.
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create()
Definition: Network.cpp:49