ArmNN
 23.02
ArmnnPreparedModel Class Referencefinal

#include <ArmnnPreparedModel.hpp>

Inheritance diagram for ArmnnPreparedModel:

Public Member Functions

 ArmnnPreparedModel (armnn::NetworkId networkId, armnn::IRuntime *runtime, const Model &model, const std::string &requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, Priority priority=Priority::MEDIUM)
 
 ArmnnPreparedModel (armnn::NetworkId networkId, armnn::IRuntime *runtime, const std::string &requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, Priority priority=Priority::MEDIUM, const bool prepareModelFromCache=false)
 
virtual ~ArmnnPreparedModel ()
 
ExecutionResult< std::pair< std::vector< OutputShape >, Timing > > execute (const Request &request, MeasureTiming measureTiming, const OptionalTimePoint &deadline, const OptionalDuration &loopTimeoutDuration, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
 
GeneralResult< std::pair< SyncFence, ExecuteFencedInfoCallback > > executeFenced (const Request &request, const std::vector< SyncFence > &waitFor, MeasureTiming measureTiming, const OptionalTimePoint &deadline, const OptionalDuration &loopTimeoutDuration, const OptionalDuration &timeoutDurationAfterFence, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
 
GeneralResult< android::nn::SharedExecution > createReusableExecution (const Request &request, MeasureTiming measureTiming, const OptionalDuration &loopTimeoutDuration, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
 
GeneralResult< SharedBurst > configureExecutionBurst () const override
 
std::any getUnderlyingResource () const override
 
ErrorStatus ExecuteGraph (std::shared_ptr< std::vector< android::nn::RunTimePoolInfo >> &pMemPools, armnn::InputTensors &inputTensors, armnn::OutputTensors &outputTensors, CanonicalExecutionContext callback, const bool pointerMemory=false) const
 execute the graph prepared from the request More...
 
Priority GetModelPriority () const
 
bool ExecuteWithDummyInputs (unsigned int numInputs, unsigned int numOutputs) const
 Executes this model with dummy inputs (e.g. More...
 

Detailed Description

Definition at line 38 of file ArmnnPreparedModel.hpp.

Constructor & Destructor Documentation

◆ ArmnnPreparedModel() [1/2]

ArmnnPreparedModel ( armnn::NetworkId  networkId,
armnn::IRuntime runtime,
const Model model,
const std::string &  requestInputsAndOutputsDumpDir,
const bool  gpuProfilingEnabled,
Priority  priority = Priority::MEDIUM 
)

Definition at line 126 of file ArmnnPreparedModel.cpp.

132  : m_NetworkId(networkId)
133  , m_Runtime(runtime)
134  , m_Model(model)
135  , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
136  , m_GpuProfilingEnabled(gpuProfilingEnabled)
137  , m_ModelPriority(priority)
138  , m_PrepareFromCache(false)
139 {
140  Init();
141 }

◆ ArmnnPreparedModel() [2/2]

ArmnnPreparedModel ( armnn::NetworkId  networkId,
armnn::IRuntime runtime,
const std::string &  requestInputsAndOutputsDumpDir,
const bool  gpuProfilingEnabled,
Priority  priority = Priority::MEDIUM,
const bool  prepareModelFromCache = false 
)

Definition at line 143 of file ArmnnPreparedModel.cpp.

149  : m_NetworkId(networkId)
150  , m_Runtime(runtime)
151  , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
152  , m_GpuProfilingEnabled(gpuProfilingEnabled)
153  , m_ModelPriority(priority)
154  , m_PrepareFromCache(prepareModelFromCache)
155 {
156  Init();
157 }

◆ ~ArmnnPreparedModel()

~ArmnnPreparedModel ( )
virtual

Definition at line 628 of file ArmnnPreparedModel.cpp.

629 {
630  VLOG(DRIVER) << "ArmnnPreparedModel::~ArmnnPreparedModel()";
631  // Get a hold of the profiler used by this model.
632  if (m_GpuProfilingEnabled)
633  {
634  auto profiler = m_Runtime->GetProfiler(m_NetworkId);
635  if (profiler)
636  {
637  // Dump the profiling info to a file if required.
638  DumpJsonProfilingIfRequired(m_GpuProfilingEnabled,
639  m_RequestInputsAndOutputsDumpDir,
640  m_NetworkId,
641  profiler.get());
642  }
643  }
644  // Unload the network associated with this model
645  m_Runtime->UnloadNetwork(m_NetworkId);
646 }

References armnn_driver::DumpJsonProfilingIfRequired(), IRuntime::GetProfiler(), and IRuntime::UnloadNetwork().

Member Function Documentation

◆ configureExecutionBurst()

GeneralResult< SharedBurst > configureExecutionBurst ( ) const
override

Definition at line 600 of file ArmnnPreparedModel.cpp.

601 {
602  // TODO: Implement BURST
603  return nullptr;
604 }

◆ createReusableExecution()

GeneralResult< SharedExecution > createReusableExecution ( const Request &  request,
MeasureTiming  measureTiming,
const OptionalDuration &  loopTimeoutDuration,
const std::vector< android::nn::TokenValuePair > &  hints,
const std::vector< android::nn::ExtensionNameAndPrefix > &  extensionNameToPrefix 
) const
override

Definition at line 586 of file ArmnnPreparedModel.cpp.

592 {
593  VLOG(DRIVER) << "ArmnnPreparedModel::createReusableExecution()";
594  return std::make_shared<DefaultExecution>(shared_from_this(),
595  request,
596  measureTiming,
597  loopTimeoutDuration);
598 }

◆ execute()

ExecutionResult< std::pair< std::vector< OutputShape >, Timing > > execute ( const Request &  request,
MeasureTiming  measureTiming,
const OptionalTimePoint &  deadline,
const OptionalDuration &  loopTimeoutDuration,
const std::vector< android::nn::TokenValuePair > &  hints,
const std::vector< android::nn::ExtensionNameAndPrefix > &  extensionNameToPrefix 
) const
override

Definition at line 295 of file ArmnnPreparedModel.cpp.

302 {
303  VLOG(DRIVER) << "CanonicalDriver::PreparedModel::execute()";
304 
306  if (measureTiming == MeasureTiming::YES)
307  {
308  ctx.measureTimings = measureTiming;
309  ctx.driverStart = Clock::now();
310  }
311 
312  if (!m_PrepareFromCache)
313  {
314  const auto modelRequest = validateRequestForModel(request, m_Model);
315  if (!modelRequest.ok())
316  {
317  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << modelRequest.error();
318  }
319  VLOG(DRIVER) << "ArmnnPreparedModel::execute(): " << GetModelSummary(m_Model).c_str();
320  }
321  if (hasDeadlinePassed(deadline))
322  {
323  return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
324  }
325 
326  // map the memory pool into shared pointers
327  // use a shared memory pools vector on the heap, as it is passed to the request thread
328  auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
329 
330  // allocate the tensors on the heap, as they are passed to the request thread
331  auto inputTensors = std::make_shared<armnn::InputTensors>();
332  auto outputTensors = std::make_shared<armnn::OutputTensors>();
333 
334  auto isPointerTypeMemory = IsPointerTypeMemory(request);
335  ErrorStatus theErrorStatus = PrepareMemoryForIO(*inputTensors,
336  *outputTensors,
337  *memPools,
338  request,
339  isPointerTypeMemory);
340 
341  switch(theErrorStatus)
342  {
343  case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
344  return NN_ERROR(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE);
345  case ErrorStatus::GENERAL_FAILURE:
346  return NN_ERROR(ErrorStatus::GENERAL_FAILURE);
347  case ErrorStatus::INVALID_ARGUMENT:
348  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT);
349  default:
350  {}
351  }
352 
353  std::vector<OutputShape> outputShapes(outputTensors->size());
354  for (unsigned int i = 0; i < outputTensors->size(); i++)
355  {
356  std::pair<int, armnn::Tensor> outputTensorPair = (*outputTensors)[i];
357  const armnn::Tensor outputTensor = outputTensorPair.second;
358  const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
359 
360  outputShapes[i] = ComputeShape(outputTensorInfo);
361  }
362  Timing theTiming;
363 
364  VLOG(DRIVER) << "ArmnnPreparedModel::execute(...) before ExecuteGraph";
365  auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx, isPointerTypeMemory);
366  if (errorStatus != ErrorStatus::NONE)
367  {
368  return NN_ERROR(errorStatus) << "execute() failed";
369  }
370  VLOG(DRIVER) << "ArmnnPreparedModel::execute(...) after ExecuteGraph";
371 
372  return std::make_pair(outputShapes, theTiming);
373 }

References armnn_driver::ComputeShape(), CanonicalExecutionContext::driverStart, ArmnnPreparedModel::ExecuteGraph(), BaseTensor< MemoryType >::GetInfo(), armnn_driver::GetModelSummary(), and CanonicalExecutionContext::measureTimings.

◆ executeFenced()

GeneralResult< std::pair< SyncFence, ExecuteFencedInfoCallback > > executeFenced ( const Request &  request,
const std::vector< SyncFence > &  waitFor,
MeasureTiming  measureTiming,
const OptionalTimePoint &  deadline,
const OptionalDuration &  loopTimeoutDuration,
const OptionalDuration &  timeoutDurationAfterFence,
const std::vector< android::nn::TokenValuePair > &  hints,
const std::vector< android::nn::ExtensionNameAndPrefix > &  extensionNameToPrefix 
) const
override

Definition at line 478 of file ArmnnPreparedModel.cpp.

487 {
488  VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced()";
489 
490  if (!m_PrepareFromCache) {
491  const auto modelRequest = validateRequestForModel(request, m_Model);
492  if (!modelRequest.ok())
493  {
494  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << modelRequest.error();
495  }
496  VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced(): " << GetModelSummary(m_Model).c_str();
497  }
498  if (hasDeadlinePassed(deadline))
499  {
500  return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
501  }
502 
504  if (measureTiming == MeasureTiming::YES)
505  {
506  ctx.measureTimings = measureTiming;
507  ctx.driverStart = Clock::now();
508  }
509 
510  // Wait for the dependent events to signal
511  for (const auto& syncFence : waitFor)
512  {
513  if (!syncFence.getSharedHandle())
514  {
515  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT);
516  }
517  if (syncFence.syncWait({}) != SyncFence::FenceState::SIGNALED)
518  {
519  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "syncWait failed";
520  }
521  }
522 
523  android::nn::TimePoint fenceExecutionStart;
524  if (measureTiming == MeasureTiming::YES)
525  {
526  fenceExecutionStart = Clock::now();
527  }
528 
529  // map the memory pool into shared pointers
530  // use a shared memory pools vector on the heap, as it is passed to the request thread
531  auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
532 
533  // allocate the tensors on the heap, as they are passed to the request thread
534  auto inputTensors = std::make_shared<armnn::InputTensors>();
535  auto outputTensors = std::make_shared<armnn::OutputTensors>();
536 
537  auto isPointerTypeMemory = IsPointerTypeMemory(request);
538  ErrorStatus theErrorStatus = PrepareMemoryForIO(*inputTensors,
539  *outputTensors,
540  *memPools,
541  request,
542  isPointerTypeMemory);
543 
544  if (theErrorStatus != ErrorStatus::NONE)
545  {
546  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "executeFenced() failed";
547  }
548 
549  Timing timingSinceLaunch = {};
550  Timing timingAfterFence = {};
551  if (measureTiming == MeasureTiming::YES)
552  {
553  timingAfterFence.timeOnDevice = ctx.deviceEnd - ctx.deviceStart;
554  timingAfterFence.timeInDriver = ctx.driverEnd - fenceExecutionStart;
555  VLOG(DRIVER) << "executeFenced timingSinceLaunch = " << timingAfterFence.timeOnDevice;
556  VLOG(DRIVER) << "executeFenced timingAfterFence = " << timingAfterFence.timeInDriver;
557  }
558 
559  VLOG(DRIVER) << "ArmnnCanonicalPreparedModel::executeFenced(...) before ExecuteGraph";
560  auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx, isPointerTypeMemory);
561  VLOG(DRIVER) << "ArmnnCanonicalPreparedModel::executeFenced(...) after ExecuteGraph";
562 
563  ExecuteFencedInfoCallback armnnFencedExecutionCallback =
564  [timingSinceLaunch, timingAfterFence, errorStatus]() {
565 
566  GeneralResult<std::pair<Timing, Timing>> result;
567 
568  switch(errorStatus)
569  {
570  case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
571  result.error().code = (ErrorStatus::OUTPUT_INSUFFICIENT_SIZE);
572  case ErrorStatus::GENERAL_FAILURE:
573  result.error().code = (ErrorStatus::GENERAL_FAILURE);
574  case ErrorStatus::INVALID_ARGUMENT:
575  result.error().code = (ErrorStatus::INVALID_ARGUMENT);
576  default:
577  {
578  result.value() = std::make_pair(timingSinceLaunch, timingAfterFence);
579  }
580  }
581  return result;
582  };
583  return std::make_pair(SyncFence::createAsSignaled(), std::move(armnnFencedExecutionCallback ));
584 }

References CanonicalExecutionContext::deviceEnd, CanonicalExecutionContext::deviceStart, CanonicalExecutionContext::driverEnd, CanonicalExecutionContext::driverStart, ArmnnPreparedModel::ExecuteGraph(), armnn_driver::GetModelSummary(), and CanonicalExecutionContext::measureTimings.

◆ ExecuteGraph()

ErrorStatus ExecuteGraph ( std::shared_ptr< std::vector< android::nn::RunTimePoolInfo >> &  pMemPools,
armnn::InputTensors inputTensors,
armnn::OutputTensors outputTensors,
CanonicalExecutionContext  callback,
const bool  pointerMemory = false 
) const

execute the graph prepared from the request

Definition at line 375 of file ArmnnPreparedModel.cpp.

381 {
382  VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph(...)";
383 
384  DumpTensorsIfRequired("Input", inputTensors);
385  std::vector<armnn::ImportedInputId> importedInputIds;
386  std::vector<armnn::ImportedOutputId> importedOutputIds;
387  try
388  {
389  if (ctx.measureTimings == MeasureTiming::YES)
390  {
391  ctx.deviceStart = Clock::now();
392  }
393  armnn::Status status;
394  VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false";
395  importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
396  if (!importedInputIds.empty())
397  {
398  // Some or all of the input tensors been imported. We need to remove the ones that could from
399  // inputTensors.
400  for (armnn::ImportedInputId& importedId : importedInputIds)
401  {
402  inputTensors.erase(
403  std::remove_if(
404  inputTensors.begin(), inputTensors.end(),
405  [&importedId](std::pair<armnn::LayerBindingId, class armnn::ConstTensor>& element) {
406  return (element.first == static_cast<int>(importedId));
407  }),
408  inputTensors.end());
409  }
410  }
411  importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
412  if (!importedOutputIds.empty())
413  {
414  // Some or all of the output tensors could not be imported. We need to remove the ones that could
415  // from outputTensors.
416  for (armnn::ImportedInputId& importedId : importedOutputIds)
417  {
418  outputTensors.erase(
419  std::remove_if(
420  outputTensors.begin(), outputTensors.end(),
421  [&importedId](std::pair<armnn::LayerBindingId, class armnn::Tensor>& element) {
422  return (element.first == static_cast<int>(importedId));
423  }),
424  outputTensors.end());
425  }
426  }
427  status = m_Runtime->EnqueueWorkload(m_NetworkId,
428  inputTensors,
429  outputTensors,
430  importedInputIds,
431  importedOutputIds);
432 
433  if (ctx.measureTimings == MeasureTiming::YES)
434  {
435  ctx.deviceEnd = Clock::now();
436  }
437  if (status != armnn::Status::Success)
438  {
439  VLOG(DRIVER) << "ArmnnPreparedModel:ExecuteGraph EnqueueWorkload failed";
440  return ErrorStatus::GENERAL_FAILURE;
441  }
442  }
443  catch (armnn::Exception& e)
444  {
445  VLOG(DRIVER) << "armnn:Exception caught from EnqueueWorkload: " << e.what();
446  return ErrorStatus::GENERAL_FAILURE;
447  }
448  catch (std::exception& e)
449  {
450  VLOG(DRIVER) << "std::exception caught from EnqueueWorkload: " << e.what();
451  return ErrorStatus::GENERAL_FAILURE;
452  }
453 
454  if (!pointerMemory && (!importedInputIds.empty() || !importedOutputIds.empty()))
455  {
456  CommitPools(*pMemPools);
457  }
458  DumpTensorsIfRequired("Output", outputTensors);
459 
460  if (ctx.measureTimings == MeasureTiming::YES)
461  {
462  ctx.driverEnd = Clock::now();
463  Timing timing;
464  timing.timeOnDevice = ctx.deviceEnd - ctx.deviceStart;
465  timing.timeInDriver = ctx.driverEnd - ctx.driverStart;
466  VLOG(DRIVER) << "ArmnnPreparedModel::execute timing - Device = "
467  << timing.timeOnDevice << "Driver = " << timing.timeInDriver;
468  }
469  return ErrorStatus::NONE;
470 }

References armnn_driver::CommitPools(), CanonicalExecutionContext::deviceEnd, CanonicalExecutionContext::deviceStart, CanonicalExecutionContext::driverEnd, CanonicalExecutionContext::driverStart, IRuntime::EnqueueWorkload(), IRuntime::ImportInputs(), IRuntime::ImportOutputs(), armnn::Malloc, CanonicalExecutionContext::measureTimings, armnn::Success, and Exception::what().

Referenced by ArmnnPreparedModel::execute(), ArmnnPreparedModel::executeFenced(), and ArmnnPreparedModel::ExecuteWithDummyInputs().

◆ ExecuteWithDummyInputs()

bool ExecuteWithDummyInputs ( unsigned int  numInputs,
unsigned int  numOutputs 
) const

Executes this model with dummy inputs (e.g.

all zeroes).

Returns
false on failure, otherwise true

Definition at line 648 of file ArmnnPreparedModel.cpp.

649 {
650  std::vector<std::vector<char>> storage;
651  armnn::InputTensors inputTensors;
652  for (unsigned int i = 0; i < numInputs; i++)
653  {
654  armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
655  // pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
656  // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
657  inputTensorInfo.SetConstant();
658  storage.emplace_back(inputTensorInfo.GetNumBytes());
659  const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data());
660 
661  inputTensors.emplace_back(i, inputTensor);
662  }
663 
664  armnn::OutputTensors outputTensors;
665  for (unsigned int i = 0; i < numOutputs; i++)
666  {
667  const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
668  storage.emplace_back(outputTensorInfo.GetNumBytes());
669  const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data());
670 
671  outputTensors.emplace_back(i, outputTensor);
672  }
674  ctx.measureTimings = MeasureTiming::NO;
675  auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
676 
677  auto errorStatus = ExecuteGraph(memPools,
678  inputTensors,
679  outputTensors,
680  ctx);
681 
682  return errorStatus == ErrorStatus::NONE;
683 }

References ArmnnPreparedModel::ExecuteGraph(), IRuntime::GetInputTensorInfo(), TensorInfo::GetNumBytes(), IRuntime::GetOutputTensorInfo(), CanonicalExecutionContext::measureTimings, and TensorInfo::SetConstant().

◆ GetModelPriority()

Priority GetModelPriority ( ) const

Definition at line 472 of file ArmnnPreparedModel.cpp.

473 {
474  return m_ModelPriority;
475 }

◆ getUnderlyingResource()

std::any getUnderlyingResource ( ) const
override

Definition at line 606 of file ArmnnPreparedModel.cpp.

607 {
608  return &m_Model;
609 }

The documentation for this class was generated from the following files:
armnn_driver::ArmnnPreparedModel::ExecuteGraph
ErrorStatus ExecuteGraph(std::shared_ptr< std::vector< android::nn::RunTimePoolInfo >> &pMemPools, armnn::InputTensors &inputTensors, armnn::OutputTensors &outputTensors, CanonicalExecutionContext callback, const bool pointerMemory=false) const
execute the graph prepared from the request
Definition: ArmnnPreparedModel.cpp:375
armnn_driver::CanonicalExecutionContext::measureTimings
::android::nn::MeasureTiming measureTimings
Definition: ArmnnPreparedModel.hpp:31
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::IRuntime::UnloadNetwork
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:136
armnn_driver::CanonicalExecutionContext::driverEnd
android::nn::TimePoint driverEnd
Definition: ArmnnPreparedModel.hpp:34
armnn::IRuntime::GetProfiler
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:151
armnn::MemorySource::Malloc
@ Malloc
armnn_driver::DumpJsonProfilingIfRequired
void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler)
Definition: CanonicalUtils.cpp:350
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::ImportedInputId
unsigned int ImportedInputId
Definition: Types.hpp:291
armnn::Exception::what
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
armnn::IRuntime::GetOutputTensorInfo
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:87
armnn_driver::CanonicalExecutionContext::deviceEnd
android::nn::TimePoint deviceEnd
Definition: ArmnnPreparedModel.hpp:36
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
armnn::IRuntime::GetInputTensorInfo
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:82
armnn_driver::CanonicalExecutionContext::deviceStart
android::nn::TimePoint deviceStart
Definition: ArmnnPreparedModel.hpp:35
armnn::InputTensors
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
armnn_driver::CanonicalExecutionContext::driverStart
android::nn::TimePoint driverStart
Definition: ArmnnPreparedModel.hpp:33
armnn_driver::ComputeShape
OutputShape ComputeShape(const armnn::TensorInfo &info)
Definition: CanonicalUtils.hpp:95
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::IRuntime::EnqueueWorkload
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:113
armnn::Status::Success
@ Success
armnn::Status
Status
Definition: Types.hpp:42
armnn_driver::ErrorStatus
::android::nn::ErrorStatus ErrorStatus
Definition: ConversionUtils.hpp:49
armnn_driver::GetModelSummary
std::string GetModelSummary(const Model &model)
Definition: CanonicalUtils.cpp:521
armnn::IRuntime::ImportInputs
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Definition: Runtime.cpp:92
armnn::IRuntime::ImportOutputs
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
Definition: Runtime.cpp:98
armnn::OutputTensors
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
armnn::TensorInfo::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.cpp:427
armnn_driver::CanonicalExecutionContext
Definition: ArmnnPreparedModel.hpp:29
armnn::Tensor
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
armnn_driver::CommitPools
void CommitPools(std::vector<::android::nn::RunTimePoolInfo > &memPools)
Definition: CanonicalUtils.cpp:610
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295