109 std::vector<double> inferenceTimes;
135 runtime->GetDeviceSpec());
139 runtime->LoadNetwork(networkId, std::move(optimizedNet));
142 if (armnnparser->GetSubgraphCount() != 1)
144 std::cout <<
"Model with more than 1 subgraph is not supported by this benchmark application.\n";
147 size_t subgraphId = 0;
150 std::cout <<
"\nModel information:" << std::endl;
151 std::vector<armnnTfLiteParser::BindingPointInfo> inputBindings;
152 std::vector<armnn::TensorInfo> inputTensorInfos;
153 std::vector<std::string> inputTensorNames = armnnparser->GetSubgraphInputTensorNames(subgraphId);
154 for (
unsigned int i = 0; i < inputTensorNames.size() ; i++)
156 std::cout <<
"inputTensorNames[" << i <<
"] = " << inputTensorNames[i] << std::endl;
159 inputTensorNames[i]);
160 armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, inputBinding.first);
161 inputBindings.push_back(inputBinding);
162 inputTensorInfos.push_back(inputTensorInfo);
166 std::vector<armnnTfLiteParser::BindingPointInfo> outputBindings;
167 std::vector<armnn::TensorInfo> outputTensorInfos;
168 std::vector<std::string> outputTensorNames = armnnparser->GetSubgraphOutputTensorNames(subgraphId);
169 for (
unsigned int i = 0; i < outputTensorNames.size() ; i++)
171 std::cout <<
"outputTensorNames[" << i <<
"] = " << outputTensorNames[i] << std::endl;
174 outputTensorNames[i]);
175 armnn::TensorInfo outputTensorInfo = runtime->GetOutputTensorInfo(networkId, outputBinding.first);
176 outputBindings.push_back(outputBinding);
177 outputTensorInfos.push_back(outputTensorInfo);
183 std::vector<std::vector<float>> in;
184 for (
unsigned int i = 0 ; i < nb_inputs ; i++)
186 std::vector<float> in_data(inputTensorInfos.at(i).GetNumElements());
187 in.push_back(in_data);
188 inputTensors.push_back({ inputBindings[i].first,
armnn::ConstTensor(inputBindings[i].second, in[i].data()) });
194 std::vector<std::vector<float>> out;
195 for (
unsigned int i = 0; i < nb_ouputs ; i++)
197 std::vector<float> out_data(outputTensorInfos.at(i).GetNumElements());
198 out.push_back(out_data);
199 outputTensors.push_back({ outputBindings[i].first,
armnn::Tensor(outputBindings[i].second, out[i].data()) });
203 std::cout <<
"\ninferences are running: " << std::flush;
204 for (
int i = 0 ; i <
nb_loops ; i++)
206 struct timeval start_time, stop_time;
207 gettimeofday(&start_time,
nullptr);
209 runtime->EnqueueWorkload(networkId, inputTensors, outputTensors);
211 gettimeofday(&stop_time,
nullptr);
212 inferenceTimes.push_back((
get_us(stop_time) -
get_us(start_time)));
213 std::cout <<
"# " << std::flush;
216 auto maxInfTime = *std::max_element(inferenceTimes.begin(), inferenceTimes.end());
217 auto minInfTime = *std::min_element(inferenceTimes.begin(), inferenceTimes.end());
218 auto avgInfTime = accumulate(inferenceTimes.begin(), inferenceTimes.end(), 0.0) /
220 std::cout <<
"\n\ninference time: ";
221 std::cout <<
"min=" << minInfTime <<
"us ";
222 std::cout <<
"max=" << maxInfTime <<
"us ";
223 std::cout <<
"avg=" << avgInfTime <<
"us" << std::endl;
static IRuntimePtr Create(const CreationOptions &options)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
static ITfLiteParserPtr Create(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
double get_us(struct timeval t)
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
void process_args(int argc, char **argv)
std::string model_file_str
std::vector< armnn::BackendId > default_preferred_backends_order
Base class for all ArmNN exceptions so that users can filter to just those.
std::vector< armnn::BackendId > preferred_backends_order
armnn::BindingPointInfo BindingPointInfo
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr