ArmNN
 20.02
NeonOptimizedNetworkTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <Graph.hpp>
9 #include <Network.hpp>
10 
12 
13 #include <boost/test/unit_test.hpp>
14 
15 BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
16 
17 BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
18 {
19  // build up the structure of the network
21 
22  armnn::IConnectableLayer* input = net->AddInputLayer(0);
23  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
24 
25  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
27 
30 
31  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
32  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
33  BOOST_CHECK(optNet);
34  // validate workloads
36  NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
37 
38  for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
39  {
40  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
41  BOOST_CHECK_NO_THROW(
42  layer->CreateWorkload(fact));
43  }
44 }
45 
46 BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
47 {
48  // build up the structure of the network
50 
51  armnn::IConnectableLayer* input = net->AddInputLayer(0);
52 
53  // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
55  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
56 
57  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
58 
59  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
60  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
61 
64 
67 
68  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
69  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
70  BOOST_CHECK(!optNet);
71 }
72 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
BOOST_AUTO_TEST_SUITE_END()
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create()
Definition: Network.cpp:49