// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "ArmnnDriver.hpp" #include "ArmnnDriverImpl.hpp" #include "RequestThread.hpp" #include "ModelToINetworkConverter.hpp" #include #include #include #include namespace armnn_driver { typedef std::function outputShapes, const ::android::hardware::neuralnetworks::V1_2::Timing& timing, std::string callingFunction)> armnnExecuteCallback_1_2; struct ArmnnCallback_1_2 { armnnExecuteCallback_1_2 callback; TimePoint driverStart; MeasureTiming measureTiming; }; template class ArmnnPreparedModel_1_2 : public V1_2::IPreparedModel { public: using HalModel = typename V1_2::Model; ArmnnPreparedModel_1_2(armnn::NetworkId networkId, armnn::IRuntime* runtime, const HalModel& model, const std::string& requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled); virtual ~ArmnnPreparedModel_1_2(); virtual Return execute(const Request& request, const sp& callback) override; virtual Return execute_1_2(const Request& request, MeasureTiming measure, const sp& callback) override; virtual Return executeSynchronously(const Request &request, MeasureTiming measure, V1_2::IPreparedModel::executeSynchronously_cb cb) override; virtual Return configureExecutionBurst( const sp& callback, const android::hardware::MQDescriptorSync& requestChannel, const android::hardware::MQDescriptorSync& resultChannel, configureExecutionBurst_cb cb) override; /// execute the graph prepared from the request void ExecuteGraph(std::shared_ptr>& pMemPools, std::shared_ptr& pInputTensors, std::shared_ptr& pOutputTensors, ArmnnCallback_1_2 callbackDescriptor); /// Executes this model with dummy inputs (e.g. all zeroes). /// \return false on failure, otherwise true bool ExecuteWithDummyInputs(); private: Return Execute(const Request& request, MeasureTiming measureTiming, armnnExecuteCallback_1_2 callback); template void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings); armnn::NetworkId m_NetworkId; armnn::IRuntime* m_Runtime; V1_2::Model m_Model; // There must be a single RequestThread for all ArmnnPreparedModel objects to ensure serial execution of workloads // It is specific to this class, so it is declared as static here static RequestThread m_RequestThread; uint32_t m_RequestCount; const std::string& m_RequestInputsAndOutputsDumpDir; const bool m_GpuProfilingEnabled; }; }