ArmNN
 23.02
ArmnnDriverImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArmnnDriverImpl.hpp"
7 #include "ArmnnPreparedModel.hpp"
10 
12 
13 #include <log/log.h>
14 #include <sys/stat.h>
15 
16 namespace
17 {
18 
19 Capabilities GenerateCapabilities()
20 {
21  VLOG(DRIVER) << "ArmnnDriverImpl::GenerateCapabilities()";
22 
23  float defaultPerfValue = .1f;
24  const Capabilities::PerformanceInfo defaultPerfInfo = { /* execTime */ defaultPerfValue,
25  /* powerUsage */ defaultPerfValue
26  };
27  std::vector<OperandType> operandsTypes({
28  OperandType::FLOAT32,
29  OperandType::INT32,
30  OperandType::UINT32,
31  OperandType::TENSOR_FLOAT32,
32  OperandType::TENSOR_INT32,
33  OperandType::TENSOR_QUANT8_ASYMM,
34  OperandType::BOOL,
35  OperandType::TENSOR_QUANT16_SYMM,
36  OperandType::TENSOR_FLOAT16,
37  OperandType::TENSOR_BOOL8,
38  OperandType::FLOAT16,
39  OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
40  OperandType::TENSOR_QUANT16_ASYMM,
41  OperandType::TENSOR_QUANT8_SYMM,
42  OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
43  });
44 
45  std::vector<Capabilities::OperandPerformance> operandPerformances;
46  operandPerformances.reserve(operandsTypes.size());
47 
48  for (auto opType : operandsTypes)
49  {
50  operandPerformances.push_back(
51  Capabilities::OperandPerformance{ /* type */ opType, /* info */ defaultPerfInfo });
52  }
53 
54  auto operandPerformanceTable =
55  Capabilities::OperandPerformanceTable::create(std::move(operandPerformances)).value();
56 
57  return { /* relaxedFloat32toFloat16PerformanceScalar */ defaultPerfInfo,
58  /* relaxedFloat32toFloat16PerformanceTensor */ defaultPerfInfo,
59  /* operandPerformance */ std::move(operandPerformanceTable),
60  /* ifPerformance */ defaultPerfInfo,
61  /* whilePerformance */ defaultPerfInfo };
62 }
63 
64 size_t Hash(std::vector<uint8_t>& cacheData)
65 {
66  std::size_t hash = cacheData.size();
67  for (auto& i : cacheData)
68  {
69  hash = ((hash << 5) - hash) + i;
70  }
71  return hash;
72 }
73 
74 } // anonymous namespace
75 
76 using namespace android::nn;
77 
78 namespace armnn_driver
79 {
80 
81 bool ArmnnDriverImpl::ValidateSharedHandle(const SharedHandle& sharedHandle)
82 {
83  bool valid = true;
84 
85  if (*sharedHandle < 0)
86  {
87  return !valid;
88  }
89 
90  int dataCacheFileAccessMode = fcntl(*sharedHandle, F_GETFL) & O_ACCMODE;
91  if (dataCacheFileAccessMode != O_RDWR)
92  {
93  return !valid;
94  }
95 
96  return valid;
97 }
98 
99 GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModel(
100  const armnn::IRuntimePtr& runtime,
101  const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
102  const DriverOptions& options,
103  const Model& model,
104  const std::vector<SharedHandle>& modelCacheHandle,
105  const std::vector<SharedHandle>& dataCacheHandle,
106  const CacheToken& token,
107  bool float32ToFloat16,
108  Priority priority)
109 {
110  VLOG(DRIVER) << "ArmnnDriverImpl::PrepareArmnnModel()";
111 
112  if (!runtime)
113  {
114  return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE) << "Device unavailable";
115  }
116 
117  if (const auto result = validate(model); !result.ok())
118  {
119  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid model passed as input";
120  }
121 
122  // Deliberately ignore any unsupported operations requested by the options -
123  // at this point we're being asked to prepare a model that we've already declared support for
124  // and the operation indices may be different to those in getSupportedOperations anyway.
125  std::set<unsigned int> unsupportedOperations;
126  ModelToINetworkTransformer modelConverter(options.GetBackends(),
127  model,
128  unsupportedOperations);
129 
130  if (modelConverter.GetConversionResult() != ConversionResult::Success)
131  {
132  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "ModelToINetworkConverter failed";
133  }
134 
135  // Serialize the network graph to a .armnn file if an output directory
136  // has been specified in the drivers' arguments.
137  std::vector<uint8_t> dataCacheData;
138  bool serializeToFile = dataCacheHandle.size() < 1 ? false : true;
139  auto serializedNetworkFileName =
140  SerializeNetwork(*modelConverter.GetINetwork(),
142  dataCacheData,
143  serializeToFile);
144 
145  // Optimize the network
146  armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
147  armnn::OptimizerOptions OptOptions;
148  OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
149  OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
150 
151  int cachedFd = -1;
152  bool saveCachedNetwork = options.SaveCachedNetwork();
153 
154  unsigned int numberOfCachedModelFiles = 0;
155  if (modelCacheHandle.size() > 0)
156  {
157  unsigned int index = 0;
158  for (auto& backend : options.GetBackends())
159  {
160  // modelCacheHandle size should be equal to numberOfCachedModelFiles
161  // modelCacheHandle vector should be in same order as backends
162  auto numberOfCacheFiles = GetNumberOfCacheFiles(backend);
163  if (numberOfCacheFiles > 0)
164  {
165  numberOfCachedModelFiles += numberOfCacheFiles;
166  // For GpuAcc numberOfCachedFiles is 1
167  if (backend == armnn::Compute::GpuAcc)
168  {
169  cachedFd = *modelCacheHandle[index];
170  saveCachedNetwork = true;
171  }
172  index += numberOfCachedModelFiles;
173  }
174  }
175  }
176 
177  armnn::BackendOptions gpuAcc("GpuAcc",
178  {
179  { "FastMathEnabled", options.IsFastMathEnabled() },
180  { "SaveCachedNetwork", saveCachedNetwork },
181  { "CachedNetworkFilePath", options.GetCachedNetworkFilePath() },
182  { "MLGOTuningFilePath", options.GetClMLGOTunedParametersFile() },
183  { "CachedFileDescriptor", cachedFd }
184  });
185 
186  armnn::BackendOptions cpuAcc("CpuAcc",
187  {
188  { "FastMathEnabled", options.IsFastMathEnabled() },
189  { "NumberOfThreads", options.GetNumberOfThreads() }
190  });
191  OptOptions.m_ModelOptions.push_back(gpuAcc);
192  OptOptions.m_ModelOptions.push_back(cpuAcc);
193 
194  std::vector<std::string> errMessages;
195  try
196  {
197  optNet = armnn::Optimize(*modelConverter.GetINetwork(),
198  options.GetBackends(),
199  runtime->GetDeviceSpec(),
200  OptOptions,
201  errMessages);
202  }
203  catch (std::exception& e)
204  {
205  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << e.what();
206  }
207 
208  // Check that the optimized network is valid.
209  if (!optNet)
210  {
211  std::stringstream message;
212  message << "Invalid optimized network";
213  for (const std::string& msg : errMessages)
214  {
215  message << "\n" << msg;
216  }
217  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
218  }
219 
220  // Export the optimized network graph to a dot file if an output dump directory
221  // has been specified in the drivers' arguments.
222  std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
224 
225  // Load it into the runtime.
226  armnn::NetworkId netId = 0;
227  std::string msg;
228  armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
229  MemorySource::Undefined,
230  MemorySource::Undefined,
231  options.IsGpuProfilingEnabled());
232  auto numInputs = getMainModel(model).inputIndexes.size();
233  auto numOutputs = getMainModel(model).outputIndexes.size();
234  try
235  {
236  if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
237  {
238  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
239  }
240  }
241  catch (std::exception& e)
242  {
243  std::stringstream message;
244  message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
245  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
246  }
247 
248  // Now that we have a networkId for the graph rename the exported files to use it
249  // so that we can associate the graph file and the input/output tensor exported files
250  RenameExportedFiles(serializedNetworkFileName,
251  dotGraphFileName,
253  netId);
254 
255  // Cache the model
256  size_t hashValue = 0;
257  if (dataCacheHandle.size() == 1 )
258  {
259  hashValue = Hash(dataCacheData);
260  }
261 
262  // Cache the model data
263  if (modelCacheHandle.size() > 0)
264  {
265  if (modelCacheHandle.size() == numberOfCachedModelFiles)
266  {
267  for (uint32_t i = 0; i < modelCacheHandle.size(); ++i)
268  {
269  int modelCacheFileAccessMode = fcntl(*modelCacheHandle[i], F_GETFL) & O_ACCMODE;
270  if (modelCacheFileAccessMode != O_RDONLY)
271  {
272  struct stat statBuffer;
273  if (fstat(*modelCacheHandle[i], &statBuffer) == 0)
274  {
275  long modelDataSize = statBuffer.st_size;
276  if (modelDataSize > 0)
277  {
278  std::vector<uint8_t> modelData(modelDataSize);
279  pread(*modelCacheHandle[i], modelData.data(), modelData.size(), 0);
280  hashValue ^= Hash(modelData);
281  }
282  }
283  }
284  }
285  }
286  }
287  if (dataCacheHandle.size() == 1 && hashValue != 0)
288  {
289  std::vector<uint8_t> theHashValue(sizeof(hashValue));
290  ::memcpy(theHashValue.data(), &hashValue, sizeof(hashValue));
291 
292  write(*dataCacheHandle[0], theHashValue.data(), theHashValue.size());
293  pwrite(*dataCacheHandle[0], dataCacheData.data(), dataCacheData.size(), theHashValue.size());
294  }
295 
296  bool executeWithDummyInputs = (std::find(options.GetBackends().begin(),
297  options.GetBackends().end(),
298  armnn::Compute::GpuAcc) != options.GetBackends().end());
299 
300  auto preparedModel = std::make_shared<const ArmnnPreparedModel>(netId,
301  runtime.get(),
302  model,
304  options.IsGpuProfilingEnabled(),
305  priority);
306 
307  // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
308  // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
309  // Only run this if the GpuAcc backend has been added to options
310  if (std::find(options.GetBackends().begin(),
311  options.GetBackends().end(),
312  armnn::Compute::GpuAcc) != options.GetBackends().end())
313  {
314  if (!preparedModel->ExecuteWithDummyInputs(numInputs, numOutputs))
315  {
316  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be executed";
317  }
318 
319  if (clTunedParameters &&
321  {
322  // Now that we've done one inference the CL kernel parameters will have been tuned,
323  // so save the updated file.
324  try
325  {
326  clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
327  }
328  catch (std::exception& error)
329  {
330  VLOG(DRIVER) << "ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file"
331  << options.GetClTunedParametersFile().c_str() << error.what();
332  }
333  }
334  }
335  return std::move(preparedModel);
336 }
337 
338 GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModelFromCache(
339  const armnn::IRuntimePtr& runtime,
340  const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
341  const DriverOptions& options,
342  const std::vector<SharedHandle>& modelCacheHandle,
343  const std::vector<SharedHandle>& dataCacheHandle,
344  const CacheToken& token,
345  bool float32ToFloat16)
346 {
347  VLOG(DRIVER) << "ArmnnDriverImpl::PrepareArmnnModelFromCache()";
348 
349  if (!runtime)
350  {
351  return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE)
352  << "ArmnnDriverImpl::prepareModelFromCache(): Device unavailable";
353  }
354 
355  if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN)
356  {
357  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
358  << "ArmnnDriverImpl::prepareModelFromCache(): Token size does not match!";
359  }
360 
361  // Validate dataCacheHandle
362  if (dataCacheHandle.size() != 1)
363  {
364  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
365  << "ArmnnDriverImpl::prepareModelFromCache(): Not valid data cache handle!";
366  }
367 
368  if (!ValidateSharedHandle(dataCacheHandle[0]))
369  {
370  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
371  << "ArmnnDriverImpl::prepareModelFromCache(): Not valid data cache handle!";
372  }
373 
374  size_t cachedDataSize = 0;
375  struct stat dataStatBuffer;
376  if (fstat(*dataCacheHandle[0], &dataStatBuffer) == 0)
377  {
378  cachedDataSize = dataStatBuffer.st_size;
379  }
380  if (cachedDataSize == 0)
381  {
382  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
383  << "ArmnnDriverImpl::prepareModelFromCache(): Not valid cached data!";
384  }
385 
386  // Check if model files cached they match the expected value
387  unsigned int numberOfCachedModelFiles = 0;
388  for (auto& backend : options.GetBackends())
389  {
390  numberOfCachedModelFiles += GetNumberOfCacheFiles(backend);
391  }
392  if (modelCacheHandle.size() != numberOfCachedModelFiles)
393  {
394  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
395  << "ArmnnDriverImpl::prepareModelFromCache(): Model cache handle size does not match.";
396  }
397 
398  // Read the hashValue
399  std::vector<uint8_t> hashValue(sizeof(size_t));
400  pread(*dataCacheHandle[0], hashValue.data(), hashValue.size(), 0);
401 
402  // Read the model
403  std::vector<uint8_t> dataCacheData(cachedDataSize - hashValue.size());
404  pread(*dataCacheHandle[0], dataCacheData.data(), dataCacheData.size(), hashValue.size());
405  auto calculatedHashValue = Hash(dataCacheData);
406 
407  int gpuAccCachedFd = -1;
408  if (modelCacheHandle.size() > 0)
409  {
410  unsigned int index = 0;
411  for (auto& backend : options.GetBackends())
412  {
413  // modelCacheHandle size should be equal to numberOfCachedModelFiles
414  // modelCacheHandle vector should be in same order as backends
415  auto numberOfCacheFiles = GetNumberOfCacheFiles(backend);
416  if (numberOfCacheFiles > 0)
417  {
418  if (!ValidateSharedHandle(modelCacheHandle[index]))
419  {
420  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
421  << "ArmnnDriverImpl::prepareModelFromCache(): Invalid model cache handle!";
422  }
423  int cachedFd = *modelCacheHandle[index];
424  struct stat statBuffer;
425  if (fstat(cachedFd, &statBuffer) == 0)
426  {
427  long modelDataSize = statBuffer.st_size;
428  if (modelDataSize > 0)
429  {
430  std::vector<uint8_t> modelData(modelDataSize);
431  pread(cachedFd, modelData.data(), modelData.size(), 0);
432  calculatedHashValue ^= Hash(modelData);
433 
434  if (backend == armnn::Compute::GpuAcc)
435  {
436  gpuAccCachedFd = cachedFd;
437  }
438  }
439  }
440  index += numberOfCacheFiles;
441  }
442  }
443  }
444 
445  std::vector<uint8_t> calculatedHashData(sizeof(calculatedHashValue));
446  ::memcpy(calculatedHashData.data(), &calculatedHashValue, sizeof(calculatedHashValue));
447  if (hashValue != calculatedHashData)
448  {
449  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
450  << "ArmnnDriverImpl::prepareModelFromCache(): ValidateHash() failed!";
451  }
452 
453  // Deserialize the network..
454  armnn::INetworkPtr network = armnn::INetworkPtr(nullptr, [](armnn::INetwork*){});
455  try
456  {
457  network = armnnDeserializer::IDeserializer::Create()->CreateNetworkFromBinary(dataCacheData);
458  }
459  catch (std::exception&)
460  {
461  return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
462  << "ArmnnDriverImpl::prepareModelFromCache(): Exception caught from Deserializer!";
463  }
464 
465  // Optimize the network
466  armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
467  armnn::OptimizerOptions OptOptions;
468  OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
469  OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
470 
471  armnn::BackendOptions gpuAcc("GpuAcc",
472  {
473  { "FastMathEnabled", options.IsFastMathEnabled() },
474  { "SaveCachedNetwork", false },
475  { "CachedNetworkFilePath", options.GetCachedNetworkFilePath() },
476  { "MLGOTuningFilePath", options.GetClMLGOTunedParametersFile() },
477  { "CachedFileDescriptor", gpuAccCachedFd }
478  });
479 
480  armnn::BackendOptions cpuAcc("CpuAcc",
481  {
482  { "FastMathEnabled", options.IsFastMathEnabled() },
483  { "NumberOfThreads", options.GetNumberOfThreads() }
484  });
485  OptOptions.m_ModelOptions.push_back(gpuAcc);
486  OptOptions.m_ModelOptions.push_back(cpuAcc);
487 
488  std::vector<std::string> errMessages;
489  try
490  {
491  optNet = armnn::Optimize(*network.get(),
492  options.GetBackends(),
493  runtime->GetDeviceSpec(),
494  OptOptions,
495  errMessages);
496  }
497  catch (std::exception& e)
498  {
499  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << e.what();
500  }
501 
502  // Check that the optimized network is valid.
503  if (!optNet)
504  {
505  std::stringstream message;
506  message << "Invalid optimized network";
507  for (const std::string& msg : errMessages)
508  {
509  message << "\n" << msg;
510  }
511  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
512  }
513 
514  // Export the optimized network graph to a dot file if an output dump directory
515  // has been specified in the drivers' arguments.
516  std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
518 
519  // Load it into the runtime.
520  armnn::NetworkId netId = 0;
521  std::string msg;
522  armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
523  MemorySource::Undefined,
524  MemorySource::Undefined,
525  options.IsGpuProfilingEnabled());
526  try
527  {
528  if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
529  {
530  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
531  }
532  }
533  catch (std::exception& e)
534  {
535  std::stringstream message;
536  message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
537  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
538  }
539 
540  auto preparedModel = std::make_shared<const ArmnnPreparedModel>(netId,
541  runtime.get(),
543  options.IsGpuProfilingEnabled(),
544  Priority::MEDIUM,
545  true);
546  return std::move(preparedModel);
547 }
548 
549 const Capabilities& ArmnnDriverImpl::GetCapabilities(const armnn::IRuntimePtr& runtime)
550 {
551  VLOG(DRIVER) << "ArmnnDriverImpl::GetCapabilities()";
552  static const Capabilities theCapabilities = GenerateCapabilities();
553  return theCapabilities;
554 }
555 
556 } // namespace armnn_driver
armnn_driver::DriverOptions::GetNumberOfThreads
unsigned int GetNumberOfThreads() const
Definition: DriverOptions.hpp:43
armnn_driver::DriverOptions::SaveCachedNetwork
bool SaveCachedNetwork() const
Definition: DriverOptions.hpp:42
armnnDeserializer::IDeserializer::Create
static IDeserializerPtr Create()
Definition: Deserializer.cpp:47
armnn_driver::DriverOptions::GetBackends
const std::vector< armnn::BackendId > & GetBackends() const
Definition: DriverOptions.hpp:26
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn_driver::ModelToINetworkTransformer::GetINetwork
armnn::INetwork * GetINetwork() const
Definition: ModelToINetworkTransformer.hpp:40
armnn::IOptimizedNetworkPtr
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:253
armnn::OptimizerOptions::m_ReduceFp32ToFp16
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
Definition: INetwork.hpp:219
ArmnnDriverImpl.hpp
armnn::OptimizerOptions::m_ModelOptions
ModelOptions m_ModelOptions
Enable Model Options.
Definition: INetwork.hpp:238
IDeserializer.hpp
armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters
@ UpdateTunedParameters
armnn::Compute::GpuAcc
@ GpuAcc
GPU Execution: OpenCL: ArmCompute.
armnn::BackendOptions
Struct for the users to pass backend specific options.
Definition: BackendOptions.hpp:22
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:43
armnn_driver::DriverOptions::isAsyncModelExecutionEnabled
bool isAsyncModelExecutionEnabled() const
Definition: DriverOptions.hpp:44
armnn_driver::SerializeNetwork
std::string SerializeNetwork(const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive)
Definition: CanonicalUtils.cpp:427
armnn::GetNumberOfCacheFiles
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
Definition: BackendHelper.cpp:120
armnn_driver::DriverOptions::GetClMLGOTunedParametersFile
const std::string & GetClMLGOTunedParametersFile() const
Definition: DriverOptions.hpp:32
armnn_driver::ExportNetworkGraphToDotFile
std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir)
Definition: CanonicalUtils.cpp:387
armnn_driver::DriverOptions::GetClTunedParametersMode
armnn::IGpuAccTunedParameters::Mode GetClTunedParametersMode() const
Definition: DriverOptions.hpp:33
armnn::IGpuAccTunedParametersPtr
std::shared_ptr< IGpuAccTunedParameters > IGpuAccTunedParametersPtr
The following API is replaced by the backend options API.
Definition: IRuntime.hpp:307
armnn::INetworkProperties
Definition: IRuntime.hpp:43
armnn::OptimizerOptions::m_ProfilingEnabled
bool m_ProfilingEnabled
Enable profiling dump of the optimizer phase.
Definition: INetwork.hpp:241
armnn_driver::DriverOptions::GetCachedNetworkFilePath
const std::string & GetCachedNetworkFilePath() const
Definition: DriverOptions.hpp:41
armnn_driver::DriverOptions::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: DriverOptions.hpp:36
armnn_driver::ModelToINetworkTransformer::GetConversionResult
ConversionResult GetConversionResult() const
Definition: ModelToINetworkTransformer.hpp:37
armnn_driver::RenameExportedFiles
void RenameExportedFiles(const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:575
armnn_driver::DriverOptions::GetClTunedParametersFile
const std::string & GetClTunedParametersFile() const
Definition: DriverOptions.hpp:31
armnn_driver::DriverOptions
Definition: DriverOptions.hpp:17
SystemPropertiesUtils.hpp
armnn::NetworkId
int NetworkId
Definition: IRuntime.hpp:35
armnn::Status::Success
@ Success
armnn_driver::DriverOptions::IsGpuProfilingEnabled
bool IsGpuProfilingEnabled() const
Definition: DriverOptions.hpp:35
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:260
ModelToINetworkTransformer.hpp
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:28
armnn::INetworkPtr
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:252
armnn::Optimize
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > & > messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1773
armnn_driver::DriverOptions::GetRequestInputsAndOutputsDumpDir
const std::string & GetRequestInputsAndOutputsDumpDir() const
Definition: DriverOptions.hpp:28
armnn::BoostLogSeverityMapping::error
@ error
armnn::OptimizerOptions
ArmNN performs an optimization on each model/network before it gets loaded for execution.
Definition: INetwork.hpp:137
armnn::IRuntimePtr
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:41
armnn_driver::ModelToINetworkTransformer
Definition: ModelToINetworkTransformer.hpp:30
ArmnnPreparedModel.hpp
android::nn
Definition: support_library_service.cpp:10