14 #include <boost/test/unit_test.hpp> 21 using namespace armnn;
29 bool WaitForPacketsSent(uint32_t timeout = 1000)
44 for (
auto const& backend : suitableBackends)
77 std::vector<armnn::BackendId> backendsVec {backend};
85 std::vector<float> inputData(16);
86 std::vector<float> outputData(16);
87 for (
unsigned int i = 0; i < 16; ++i) {
110 std::cout <<
error.what() << std::endl;
114 std::vector<std::string> expectedOutput;
115 expectedOutput.push_back(
"Entity [0] name = input type = layer");
116 expectedOutput.push_back(
" connection [17] from entity [0] to entity [1]");
117 expectedOutput.push_back(
" child: Entity [26] backendId = " + backend.Get() +
" type = workload");
118 expectedOutput.push_back(
"Entity [1] name = Rsqrt type = layer");
119 expectedOutput.push_back(
" connection [25] from entity [1] to entity [2]");
120 expectedOutput.push_back(
" child: Entity [18] backendId = " + backend.Get() +
" type = workload");
121 expectedOutput.push_back(
"Entity [2] name = output type = layer");
122 expectedOutput.push_back(
" child: Entity [30] backendId = " + backend.Get() +
" type = workload");
123 expectedOutput.push_back(
"Entity [6] processId = [processId] type = network");
124 expectedOutput.push_back(
" child: Entity [0] name = input type = layer");
125 expectedOutput.push_back(
" child: Entity [1] name = Rsqrt type = layer");
126 expectedOutput.push_back(
" child: Entity [2] name = output type = layer");
127 expectedOutput.push_back(
" execution: Entity [34] type = inference");
128 expectedOutput.push_back(
" event: [8] class [start_of_life]");
129 expectedOutput.push_back(
"Entity [18] backendId = " + backend.Get() +
" type = workload");
130 expectedOutput.push_back(
" execution: Entity [47] type = workload_execution");
131 expectedOutput.push_back(
"Entity [26] backendId = " + backend.Get() +
" type = workload");
132 expectedOutput.push_back(
" execution: Entity [39] type = workload_execution");
133 expectedOutput.push_back(
"Entity [30] backendId = " + backend.Get() +
" type = workload");
134 expectedOutput.push_back(
" execution: Entity [55] type = workload_execution");
135 expectedOutput.push_back(
"Entity [34] type = inference");
136 expectedOutput.push_back(
" child: Entity [39] type = workload_execution");
137 expectedOutput.push_back(
" child: Entity [47] type = workload_execution");
138 expectedOutput.push_back(
" child: Entity [55] type = workload_execution");
139 expectedOutput.push_back(
" event: [37] class [start_of_life]");
140 expectedOutput.push_back(
" event: [63] class [end_of_life]");
141 expectedOutput.push_back(
"Entity [39] type = workload_execution");
142 expectedOutput.push_back(
" event: [43] class [start_of_life]");
143 expectedOutput.push_back(
" event: [45] class [end_of_life]");
144 expectedOutput.push_back(
"Entity [47] type = workload_execution");
145 expectedOutput.push_back(
" event: [51] class [start_of_life]");
146 expectedOutput.push_back(
" event: [53] class [end_of_life]");
147 expectedOutput.push_back(
"Entity [55] type = workload_execution");
148 expectedOutput.push_back(
" event: [59] class [start_of_life]");
149 expectedOutput.push_back(
" event: [61] class [end_of_life]");
160 for (
auto const& backend : suitableBackends)
165 BOOST_CHECK(!fs::exists(tempPath));
200 std::vector<BackendId> backendsVec{backend};
208 std::vector<float> inputData(16);
209 std::vector<float> outputData(16);
210 for (
unsigned int i = 0; i < 16; ++i) {
234 BOOST_CHECK(fs::file_size(tempPath) > 0);
239 BOOST_CHECK(fs::remove(tempPath));
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
virtual TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override
std::shared_ptr< ILocalPacketHandler > ILocalPacketHandlerSharedPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
std::string m_OutgoingCaptureFile
virtual const IDeviceSpec & GetDeviceSpec() const override
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
const std::vector< arm::pipe::ProfilingException > & GetErrors() const
Copyright (c) 2020 ARM Limited.
std::vector< BackendId > GetSuitableBackendRegistered()
Returns a vector of CpuRef, CpuAcc or GpuAcc backends if they where registered.
std::string m_IncomingCaptureFile
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void ResetGuidGenerator()
bool WaitForPacketSent(ProfilingService &instance, uint32_t timeout=1000)
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
std::vector< armnn::profiling::ILocalPacketHandlerSharedPtr > m_LocalPacketHandlers
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
profiling::ProfilingService & GetProfilingService(armnn::Runtime *runtime)
bool CompareOutput(std::vector< std::string > output, std::vector< std::string > expectedOutput)
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
void ResetExternalProfilingOptions(const ExternalProfilingOptions &options, bool resetProfilingService=false)
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
BOOST_AUTO_TEST_SUITE_END()
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
virtual Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network) override
Loads a complete network into the Runtime.
std::vector< std::string > GetModelDescription(const TimelineModel &model)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
fs::path NamedTempFile(const char *fileName)
Construct a temporary file name.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
ExternalProfilingOptions m_ProfilingOptions
virtual Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors) override
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
static INetworkPtr Create(NetworkOptions networkOptions={})
virtual TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override