// // Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "ArmnnDriver.hpp" #include "ArmnnDriverImpl.hpp" #include "ModelToINetworkTransformer.hpp" #include #include #include #include #include #include #include #include #include #include #include #include namespace armnn_driver { struct CanonicalExecutionContext { ::android::nn::MeasureTiming measureTimings = ::android::nn::MeasureTiming::NO; android::nn::TimePoint driverStart; android::nn::TimePoint driverEnd; android::nn::TimePoint deviceStart; android::nn::TimePoint deviceEnd; }; class ArmnnPreparedModel final : public IPreparedModel, public std::enable_shared_from_this { public: ArmnnPreparedModel(armnn::NetworkId networkId, armnn::IRuntime* runtime, const Model& model, const std::string& requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, Priority priority = Priority::MEDIUM); ArmnnPreparedModel(armnn::NetworkId networkId, armnn::IRuntime* runtime, const std::string& requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, Priority priority = Priority::MEDIUM, const bool prepareModelFromCache = false); virtual ~ArmnnPreparedModel(); ExecutionResult, Timing>> execute( const Request& request, MeasureTiming measureTiming, const OptionalTimePoint& deadline, const OptionalDuration& loopTimeoutDuration, const std::vector& hints, const std::vector& extensionNameToPrefix) const override; GeneralResult> executeFenced( const Request& request, const std::vector& waitFor, MeasureTiming measureTiming, const OptionalTimePoint& deadline, const OptionalDuration& loopTimeoutDuration, const OptionalDuration& timeoutDurationAfterFence, const std::vector& hints, const std::vector& extensionNameToPrefix) const override; GeneralResult createReusableExecution( const Request& request, MeasureTiming measureTiming, const OptionalDuration& loopTimeoutDuration, const std::vector& hints, const std::vector& extensionNameToPrefix) const override; GeneralResult configureExecutionBurst() const override; std::any getUnderlyingResource() const override; /// execute the graph prepared from the request ErrorStatus ExecuteGraph( std::shared_ptr>& pMemPools, armnn::InputTensors& inputTensors, armnn::OutputTensors& outputTensors, CanonicalExecutionContext callback) const; Priority GetModelPriority() const; /// Executes this model with dummy inputs (e.g. all zeroes). /// \return false on failure, otherwise true bool ExecuteWithDummyInputs(unsigned int numInputs, unsigned int numOutputs) const; private: void Init(); ErrorStatus PrepareMemoryForInputs( armnn::InputTensors& inputs, const Request& request, const std::vector& memPools) const; ErrorStatus PrepareMemoryForOutputs( armnn::OutputTensors& outputs, std::vector &outputShapes, const Request& request, const std::vector& memPools) const; ErrorStatus PrepareMemoryForIO(armnn::InputTensors& inputs, armnn::OutputTensors& outputs, std::vector& memPools, const Request& request) const; template void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings) const; /// schedule the graph prepared from the request for execution armnn::NetworkId m_NetworkId; armnn::IRuntime* m_Runtime; const Model m_Model; const std::string& m_RequestInputsAndOutputsDumpDir; const bool m_GpuProfilingEnabled; Priority m_ModelPriority; const bool m_PrepareFromCache; }; }