ArmNN
 23.02
ArmnnDriverImpl Class Reference

#include <ArmnnDriverImpl.hpp>

Static Public Member Functions

static GeneralResult< SharedPreparedModel > PrepareArmnnModel (const armnn::IRuntimePtr &runtime, const armnn::IGpuAccTunedParametersPtr &clTunedParameters, const DriverOptions &options, const Model &model, const std::vector< SharedHandle > &modelCacheHandle, const std::vector< SharedHandle > &dataCacheHandle, const CacheToken &token, bool float32ToFloat16=false, Priority priority=Priority::MEDIUM)
 
static GeneralResult< SharedPreparedModel > PrepareArmnnModelFromCache (const armnn::IRuntimePtr &runtime, const armnn::IGpuAccTunedParametersPtr &clTunedParameters, const DriverOptions &options, const std::vector< SharedHandle > &modelCacheHandle, const std::vector< SharedHandle > &dataCacheHandle, const CacheToken &token, bool float32ToFloat16=false)
 
static const Capabilities & GetCapabilities (const armnn::IRuntimePtr &runtime)
 

Detailed Description

Definition at line 23 of file ArmnnDriverImpl.hpp.

Member Function Documentation

◆ GetCapabilities()

const Capabilities & GetCapabilities ( const armnn::IRuntimePtr runtime)
static

Definition at line 549 of file ArmnnDriverImpl.cpp.

550 {
551  VLOG(DRIVER) << "ArmnnDriverImpl::GetCapabilities()";
552  static const Capabilities theCapabilities = GenerateCapabilities();
553  return theCapabilities;
554 }

Referenced by ArmnnDriver::getCapabilities().

◆ PrepareArmnnModel()

GeneralResult< SharedPreparedModel > PrepareArmnnModel ( const armnn::IRuntimePtr runtime,
const armnn::IGpuAccTunedParametersPtr clTunedParameters,
const DriverOptions options,
const Model model,
const std::vector< SharedHandle > &  modelCacheHandle,
const std::vector< SharedHandle > &  dataCacheHandle,
const CacheToken &  token,
bool  float32ToFloat16 = false,
Priority  priority = Priority::MEDIUM 
)
static

Definition at line 99 of file ArmnnDriverImpl.cpp.

109 {
110  VLOG(DRIVER) << "ArmnnDriverImpl::PrepareArmnnModel()";
111 
112  if (!runtime)
113  {
114  return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE) << "Device unavailable";
115  }
116 
117  if (const auto result = validate(model); !result.ok())
118  {
119  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid model passed as input";
120  }
121 
122  // Deliberately ignore any unsupported operations requested by the options -
123  // at this point we're being asked to prepare a model that we've already declared support for
124  // and the operation indices may be different to those in getSupportedOperations anyway.
125  std::set<unsigned int> unsupportedOperations;
126  ModelToINetworkTransformer modelConverter(options.GetBackends(),
127  model,
128  unsupportedOperations);
129 
130  if (modelConverter.GetConversionResult() != ConversionResult::Success)
131  {
132  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "ModelToINetworkConverter failed";
133  }
134 
135  // Serialize the network graph to a .armnn file if an output directory
136  // has been specified in the drivers' arguments.
137  std::vector<uint8_t> dataCacheData;
138  bool serializeToFile = dataCacheHandle.size() < 1 ? false : true;
139  auto serializedNetworkFileName =
140  SerializeNetwork(*modelConverter.GetINetwork(),
142  dataCacheData,
143  serializeToFile);
144 
145  // Optimize the network
146  armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
147  armnn::OptimizerOptions OptOptions;
148  OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
149  OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
150 
151  int cachedFd = -1;
152  bool saveCachedNetwork = options.SaveCachedNetwork();
153 
154  unsigned int numberOfCachedModelFiles = 0;
155  if (modelCacheHandle.size() > 0)
156  {
157  unsigned int index = 0;
158  for (auto& backend : options.GetBackends())
159  {
160  // modelCacheHandle size should be equal to numberOfCachedModelFiles
161  // modelCacheHandle vector should be in same order as backends
162  auto numberOfCacheFiles = GetNumberOfCacheFiles(backend);
163  if (numberOfCacheFiles > 0)
164  {
165  numberOfCachedModelFiles += numberOfCacheFiles;
166  // For GpuAcc numberOfCachedFiles is 1
167  if (backend == armnn::Compute::GpuAcc)
168  {
169  cachedFd = *modelCacheHandle[index];
170  saveCachedNetwork = true;
171  }
172  index += numberOfCachedModelFiles;
173  }
174  }
175  }
176 
177  armnn::BackendOptions gpuAcc("GpuAcc",
178  {
179  { "FastMathEnabled", options.IsFastMathEnabled() },
180  { "SaveCachedNetwork", saveCachedNetwork },
181  { "CachedNetworkFilePath", options.GetCachedNetworkFilePath() },
182  { "MLGOTuningFilePath", options.GetClMLGOTunedParametersFile() },
183  { "CachedFileDescriptor", cachedFd }
184  });
185 
186  armnn::BackendOptions cpuAcc("CpuAcc",
187  {
188  { "FastMathEnabled", options.IsFastMathEnabled() },
189  { "NumberOfThreads", options.GetNumberOfThreads() }
190  });
191  OptOptions.m_ModelOptions.push_back(gpuAcc);
192  OptOptions.m_ModelOptions.push_back(cpuAcc);
193 
194  std::vector<std::string> errMessages;
195  try
196  {
197  optNet = armnn::Optimize(*modelConverter.GetINetwork(),
198  options.GetBackends(),
199  runtime->GetDeviceSpec(),
200  OptOptions,
201  errMessages);
202  }
203  catch (std::exception& e)
204  {
205  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << e.what();
206  }
207 
208  // Check that the optimized network is valid.
209  if (!optNet)
210  {
211  std::stringstream message;
212  message << "Invalid optimized network";
213  for (const std::string& msg : errMessages)
214  {
215  message << "\n" << msg;
216  }
217  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
218  }
219 
220  // Export the optimized network graph to a dot file if an output dump directory
221  // has been specified in the drivers' arguments.
222  std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
224 
225  // Load it into the runtime.
226  armnn::NetworkId netId = 0;
227  std::string msg;
228  armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
229  MemorySource::Undefined,
230  MemorySource::Undefined,
231  options.IsGpuProfilingEnabled());
232  auto numInputs = getMainModel(model).inputIndexes.size();
233  auto numOutputs = getMainModel(model).outputIndexes.size();
234  try
235  {
236  if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
237  {
238  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
239  }
240  }
241  catch (std::exception& e)
242  {
243  std::stringstream message;
244  message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
245  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
246  }
247 
248  // Now that we have a networkId for the graph rename the exported files to use it
249  // so that we can associate the graph file and the input/output tensor exported files
250  RenameExportedFiles(serializedNetworkFileName,
251  dotGraphFileName,
253  netId);
254 
255  // Cache the model
256  size_t hashValue = 0;
257  if (dataCacheHandle.size() == 1 )
258  {
259  hashValue = Hash(dataCacheData);
260  }
261 
262  // Cache the model data
263  if (modelCacheHandle.size() > 0)
264  {
265  if (modelCacheHandle.size() == numberOfCachedModelFiles)
266  {
267  for (uint32_t i = 0; i < modelCacheHandle.size(); ++i)
268  {
269  int modelCacheFileAccessMode = fcntl(*modelCacheHandle[i], F_GETFL) & O_ACCMODE;
270  if (modelCacheFileAccessMode != O_RDONLY)
271  {
272  struct stat statBuffer;
273  if (fstat(*modelCacheHandle[i], &statBuffer) == 0)
274  {
275  long modelDataSize = statBuffer.st_size;
276  if (modelDataSize > 0)
277  {
278  std::vector<uint8_t> modelData(modelDataSize);
279  pread(*modelCacheHandle[i], modelData.data(), modelData.size(), 0);
280  hashValue ^= Hash(modelData);
281  }
282  }
283  }
284  }
285  }
286  }
287  if (dataCacheHandle.size() == 1 && hashValue != 0)
288  {
289  std::vector<uint8_t> theHashValue(sizeof(hashValue));
290  ::memcpy(theHashValue.data(), &hashValue, sizeof(hashValue));
291 
292  write(*dataCacheHandle[0], theHashValue.data(), theHashValue.size());
293  pwrite(*dataCacheHandle[0], dataCacheData.data(), dataCacheData.size(), theHashValue.size());
294  }
295 
296  bool executeWithDummyInputs = (std::find(options.GetBackends().begin(),
297  options.GetBackends().end(),
298  armnn::Compute::GpuAcc) != options.GetBackends().end());
299 
300  auto preparedModel = std::make_shared<const ArmnnPreparedModel>(netId,
301  runtime.get(),
302  model,
304  options.IsGpuProfilingEnabled(),
305  priority);
306 
307  // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
308  // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
309  // Only run this if the GpuAcc backend has been added to options
310  if (std::find(options.GetBackends().begin(),
311  options.GetBackends().end(),
312  armnn::Compute::GpuAcc) != options.GetBackends().end())
313  {
314  if (!preparedModel->ExecuteWithDummyInputs(numInputs, numOutputs))
315  {
316  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be executed";
317  }
318 
319  if (clTunedParameters &&
321  {
322  // Now that we've done one inference the CL kernel parameters will have been tuned,
323  // so save the updated file.
324  try
325  {
326  clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
327  }
328  catch (std::exception& error)
329  {
330  VLOG(DRIVER) << "ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file"
331  << options.GetClTunedParametersFile().c_str() << error.what();
332  }
333  }
334  }
335  return std::move(preparedModel);
336 }

References armnn::error, armnn_driver::ExportNetworkGraphToDotFile(), DriverOptions::GetBackends(), DriverOptions::GetCachedNetworkFilePath(), DriverOptions::GetClMLGOTunedParametersFile(), DriverOptions::GetClTunedParametersFile(), DriverOptions::GetClTunedParametersMode(), ModelToINetworkTransformer::GetConversionResult(), ModelToINetworkTransformer::GetINetwork(), getMainModel(), armnn::GetNumberOfCacheFiles(), DriverOptions::GetNumberOfThreads(), DriverOptions::GetRequestInputsAndOutputsDumpDir(), armnn::GpuAcc, DriverOptions::isAsyncModelExecutionEnabled(), DriverOptions::IsFastMathEnabled(), DriverOptions::IsGpuProfilingEnabled(), OptimizerOptions::m_ModelOptions, OptimizerOptions::m_ProfilingEnabled, OptimizerOptions::m_ReduceFp32ToFp16, armnn::Optimize(), armnn_driver::RenameExportedFiles(), DriverOptions::SaveCachedNetwork(), armnn_driver::SerializeNetwork(), armnn::Success, and IGpuAccTunedParameters::UpdateTunedParameters.

Referenced by ArmnnDriver::prepareModel().

◆ PrepareArmnnModelFromCache()

GeneralResult< SharedPreparedModel > PrepareArmnnModelFromCache ( const armnn::IRuntimePtr runtime,
const armnn::IGpuAccTunedParametersPtr clTunedParameters,
const DriverOptions options,
const std::vector< SharedHandle > &  modelCacheHandle,
const std::vector< SharedHandle > &  dataCacheHandle,
const CacheToken &  token,
bool  float32ToFloat16 = false 
)
static

Definition at line 338 of file ArmnnDriverImpl.cpp.

346 {
347  VLOG(DRIVER) << "ArmnnDriverImpl::PrepareArmnnModelFromCache()";
348 
349  if (!runtime)
350  {
351  return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE)
352  << "ArmnnDriverImpl::prepareModelFromCache(): Device unavailable";
353  }
354 
355  if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN)
356  {
357  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
358  << "ArmnnDriverImpl::prepareModelFromCache(): Token size does not match!";
359  }
360 
361  // Validate dataCacheHandle
362  if (dataCacheHandle.size() != 1)
363  {
364  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
365  << "ArmnnDriverImpl::prepareModelFromCache(): Not valid data cache handle!";
366  }
367 
368  if (!ValidateSharedHandle(dataCacheHandle[0]))
369  {
370  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
371  << "ArmnnDriverImpl::prepareModelFromCache(): Not valid data cache handle!";
372  }
373 
374  size_t cachedDataSize = 0;
375  struct stat dataStatBuffer;
376  if (fstat(*dataCacheHandle[0], &dataStatBuffer) == 0)
377  {
378  cachedDataSize = dataStatBuffer.st_size;
379  }
380  if (cachedDataSize == 0)
381  {
382  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
383  << "ArmnnDriverImpl::prepareModelFromCache(): Not valid cached data!";
384  }
385 
386  // Check if model files cached they match the expected value
387  unsigned int numberOfCachedModelFiles = 0;
388  for (auto& backend : options.GetBackends())
389  {
390  numberOfCachedModelFiles += GetNumberOfCacheFiles(backend);
391  }
392  if (modelCacheHandle.size() != numberOfCachedModelFiles)
393  {
394  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
395  << "ArmnnDriverImpl::prepareModelFromCache(): Model cache handle size does not match.";
396  }
397 
398  // Read the hashValue
399  std::vector<uint8_t> hashValue(sizeof(size_t));
400  pread(*dataCacheHandle[0], hashValue.data(), hashValue.size(), 0);
401 
402  // Read the model
403  std::vector<uint8_t> dataCacheData(cachedDataSize - hashValue.size());
404  pread(*dataCacheHandle[0], dataCacheData.data(), dataCacheData.size(), hashValue.size());
405  auto calculatedHashValue = Hash(dataCacheData);
406 
407  int gpuAccCachedFd = -1;
408  if (modelCacheHandle.size() > 0)
409  {
410  unsigned int index = 0;
411  for (auto& backend : options.GetBackends())
412  {
413  // modelCacheHandle size should be equal to numberOfCachedModelFiles
414  // modelCacheHandle vector should be in same order as backends
415  auto numberOfCacheFiles = GetNumberOfCacheFiles(backend);
416  if (numberOfCacheFiles > 0)
417  {
418  if (!ValidateSharedHandle(modelCacheHandle[index]))
419  {
420  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
421  << "ArmnnDriverImpl::prepareModelFromCache(): Invalid model cache handle!";
422  }
423  int cachedFd = *modelCacheHandle[index];
424  struct stat statBuffer;
425  if (fstat(cachedFd, &statBuffer) == 0)
426  {
427  long modelDataSize = statBuffer.st_size;
428  if (modelDataSize > 0)
429  {
430  std::vector<uint8_t> modelData(modelDataSize);
431  pread(cachedFd, modelData.data(), modelData.size(), 0);
432  calculatedHashValue ^= Hash(modelData);
433 
434  if (backend == armnn::Compute::GpuAcc)
435  {
436  gpuAccCachedFd = cachedFd;
437  }
438  }
439  }
440  index += numberOfCacheFiles;
441  }
442  }
443  }
444 
445  std::vector<uint8_t> calculatedHashData(sizeof(calculatedHashValue));
446  ::memcpy(calculatedHashData.data(), &calculatedHashValue, sizeof(calculatedHashValue));
447  if (hashValue != calculatedHashData)
448  {
449  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
450  << "ArmnnDriverImpl::prepareModelFromCache(): ValidateHash() failed!";
451  }
452 
453  // Deserialize the network..
454  armnn::INetworkPtr network = armnn::INetworkPtr(nullptr, [](armnn::INetwork*){});
455  try
456  {
457  network = armnnDeserializer::IDeserializer::Create()->CreateNetworkFromBinary(dataCacheData);
458  }
459  catch (std::exception&)
460  {
461  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
462  << "ArmnnDriverImpl::prepareModelFromCache(): Exception caught from Deserializer!";
463  }
464 
465  // Optimize the network
466  armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
467  armnn::OptimizerOptions OptOptions;
468  OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
469  OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
470 
471  armnn::BackendOptions gpuAcc("GpuAcc",
472  {
473  { "FastMathEnabled", options.IsFastMathEnabled() },
474  { "SaveCachedNetwork", false },
475  { "CachedNetworkFilePath", options.GetCachedNetworkFilePath() },
476  { "MLGOTuningFilePath", options.GetClMLGOTunedParametersFile() },
477  { "CachedFileDescriptor", gpuAccCachedFd }
478  });
479 
480  armnn::BackendOptions cpuAcc("CpuAcc",
481  {
482  { "FastMathEnabled", options.IsFastMathEnabled() },
483  { "NumberOfThreads", options.GetNumberOfThreads() }
484  });
485  OptOptions.m_ModelOptions.push_back(gpuAcc);
486  OptOptions.m_ModelOptions.push_back(cpuAcc);
487 
488  std::vector<std::string> errMessages;
489  try
490  {
491  optNet = armnn::Optimize(*network.get(),
492  options.GetBackends(),
493  runtime->GetDeviceSpec(),
494  OptOptions,
495  errMessages);
496  }
497  catch (std::exception& e)
498  {
499  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << e.what();
500  }
501 
502  // Check that the optimized network is valid.
503  if (!optNet)
504  {
505  std::stringstream message;
506  message << "Invalid optimized network";
507  for (const std::string& msg : errMessages)
508  {
509  message << "\n" << msg;
510  }
511  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
512  }
513 
514  // Export the optimized network graph to a dot file if an output dump directory
515  // has been specified in the drivers' arguments.
516  std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
518 
519  // Load it into the runtime.
520  armnn::NetworkId netId = 0;
521  std::string msg;
522  armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
523  MemorySource::Undefined,
524  MemorySource::Undefined,
525  options.IsGpuProfilingEnabled());
526  try
527  {
528  if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
529  {
530  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
531  }
532  }
533  catch (std::exception& e)
534  {
535  std::stringstream message;
536  message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
537  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
538  }
539 
540  auto preparedModel = std::make_shared<const ArmnnPreparedModel>(netId,
541  runtime.get(),
543  options.IsGpuProfilingEnabled(),
544  Priority::MEDIUM,
545  true);
546  return std::move(preparedModel);
547 }

References IDeserializer::Create(), armnn_driver::ExportNetworkGraphToDotFile(), DriverOptions::GetBackends(), DriverOptions::GetCachedNetworkFilePath(), DriverOptions::GetClMLGOTunedParametersFile(), armnn::GetNumberOfCacheFiles(), DriverOptions::GetNumberOfThreads(), DriverOptions::GetRequestInputsAndOutputsDumpDir(), armnn::GpuAcc, DriverOptions::isAsyncModelExecutionEnabled(), DriverOptions::IsFastMathEnabled(), DriverOptions::IsGpuProfilingEnabled(), OptimizerOptions::m_ModelOptions, OptimizerOptions::m_ProfilingEnabled, OptimizerOptions::m_ReduceFp32ToFp16, armnn::Optimize(), and armnn::Success.

Referenced by ArmnnDriver::prepareModelFromCache().


The documentation for this class was generated from the following files:
armnn_driver::DriverOptions::GetNumberOfThreads
unsigned int GetNumberOfThreads() const
Definition: DriverOptions.hpp:43
armnn_driver::DriverOptions::SaveCachedNetwork
bool SaveCachedNetwork() const
Definition: DriverOptions.hpp:42
armnnDeserializer::IDeserializer::Create
static IDeserializerPtr Create()
Definition: Deserializer.cpp:47
armnn_driver::DriverOptions::GetBackends
const std::vector< armnn::BackendId > & GetBackends() const
Definition: DriverOptions.hpp:26
armnn::IOptimizedNetworkPtr
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:253
armnn::OptimizerOptions::m_ReduceFp32ToFp16
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
Definition: INetwork.hpp:219
armnn::OptimizerOptions::m_ModelOptions
ModelOptions m_ModelOptions
Enable Model Options.
Definition: INetwork.hpp:238
armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters
@ UpdateTunedParameters
armnn::Compute::GpuAcc
@ GpuAcc
GPU Execution: OpenCL: ArmCompute.
armnn::BackendOptions
Struct for the users to pass backend specific options.
Definition: BackendOptions.hpp:22
armnn_driver::DriverOptions::isAsyncModelExecutionEnabled
bool isAsyncModelExecutionEnabled() const
Definition: DriverOptions.hpp:44
armnn_driver::SerializeNetwork
std::string SerializeNetwork(const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive)
Definition: CanonicalUtils.cpp:427
armnn::GetNumberOfCacheFiles
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
Definition: BackendHelper.cpp:120
armnn_driver::DriverOptions::GetClMLGOTunedParametersFile
const std::string & GetClMLGOTunedParametersFile() const
Definition: DriverOptions.hpp:32
armnn_driver::ExportNetworkGraphToDotFile
std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir)
Definition: CanonicalUtils.cpp:387
armnn_driver::DriverOptions::GetClTunedParametersMode
armnn::IGpuAccTunedParameters::Mode GetClTunedParametersMode() const
Definition: DriverOptions.hpp:33
armnn::INetworkProperties
Definition: IRuntime.hpp:43
armnn::OptimizerOptions::m_ProfilingEnabled
bool m_ProfilingEnabled
Enable profiling dump of the optimizer phase.
Definition: INetwork.hpp:241
armnn_driver::DriverOptions::GetCachedNetworkFilePath
const std::string & GetCachedNetworkFilePath() const
Definition: DriverOptions.hpp:41
armnn_driver::DriverOptions::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: DriverOptions.hpp:36
armnn_driver::RenameExportedFiles
void RenameExportedFiles(const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:575
armnn_driver::DriverOptions::GetClTunedParametersFile
const std::string & GetClTunedParametersFile() const
Definition: DriverOptions.hpp:31
armnn::NetworkId
int NetworkId
Definition: IRuntime.hpp:35
armnn::Status::Success
@ Success
armnn_driver::DriverOptions::IsGpuProfilingEnabled
bool IsGpuProfilingEnabled() const
Definition: DriverOptions.hpp:35
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:260
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:28
armnn_driver::ConversionResult::Success
@ Success
armnn::INetworkPtr
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:252
armnn::Optimize
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > & > messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1773
armnn_driver::DriverOptions::GetRequestInputsAndOutputsDumpDir
const std::string & GetRequestInputsAndOutputsDumpDir() const
Definition: DriverOptions.hpp:28
armnn::BoostLogSeverityMapping::error
@ error
armnn::OptimizerOptions
ArmNN performs an optimization on each model/network before it gets loaded for execution.
Definition: INetwork.hpp:137
armnn_driver::ModelToINetworkTransformer
Definition: ModelToINetworkTransformer.hpp:30