8 #include <android-base/logging.h> 9 #include <nnapi/IBuffer.h> 10 #include <nnapi/IDevice.h> 11 #include <nnapi/IPreparedModel.h> 12 #include <nnapi/OperandTypes.h> 13 #include <nnapi/Result.h> 14 #include <nnapi/Types.h> 15 #include <nnapi/Validation.h> 38 VLOG(DRIVER) <<
"ArmnnDriver::ArmnnDriver()";
42 VLOG(DRIVER) <<
"ArmnnDriver::~ArmnnDriver()";
47 const std::string&
getName()
const override 49 VLOG(DRIVER) <<
"ArmnnDriver::getName()";
50 static const std::string name =
"arm-armnn-sl";
56 VLOG(DRIVER) <<
"ArmnnDriver::getVersionString()";
63 VLOG(DRIVER) <<
"ArmnnDriver::getFeatureLevel()";
64 return kVersionFeatureLevel6;
69 VLOG(DRIVER) <<
"ArmnnDriver::getType()";
70 return DeviceType::CPU;
75 VLOG(DRIVER) <<
"ArmnnDriver::getSupportedExtensions()";
76 static const std::vector<Extension> extensions = {};
82 VLOG(DRIVER) <<
"ArmnnDriver::GetCapabilities()";
88 VLOG(DRIVER) <<
"ArmnnDriver::getNumberOfCacheFilesNeeded()";
89 unsigned int numberOfCachedModelFiles = 0;
93 VLOG(DRIVER) <<
"ArmnnDriver::getNumberOfCacheFilesNeeded() = " << std::to_string(numberOfCachedModelFiles);
95 return std::make_pair(numberOfCachedModelFiles, 1ul);
98 GeneralResult<void>
wait()
const override 100 VLOG(DRIVER) <<
"ArmnnDriver::wait()";
106 VLOG(DRIVER) <<
"ArmnnDriver::getSupportedOperations()";
108 std::stringstream ss;
109 ss <<
"ArmnnDriverImpl::getSupportedOperations()";
110 std::string fileName;
111 std::string timestamp;
118 <<
"_getSupportedOperations.txt";
120 VLOG(DRIVER) << ss.str().c_str();
125 std::ofstream fileStream;
126 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
127 if (fileStream.good())
129 fileStream << timestamp << std::endl;
130 fileStream << timestamp << std::endl;
135 std::vector<bool> result;
138 return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE) <<
"Device Unavailable!";
142 if (
const auto result = validate(model); !result.ok())
144 return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) <<
"Invalid Model!";
155 return NN_ERROR(ErrorStatus::GENERAL_FAILURE) <<
"Conversion Error!";
160 result.reserve(model.main.operations.size());
161 for (uint32_t operationIdx = 0; operationIdx < model.main.operations.size(); ++operationIdx)
164 result.push_back(operationSupported);
171 ExecutionPreference preference,
173 OptionalTimePoint deadline,
174 const std::vector<SharedHandle>& modelCache,
175 const std::vector<SharedHandle>& dataCache,
176 const CacheToken& token,
177 const std::vector<android::nn::TokenValuePair>& hints,
178 const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix)
const override 180 VLOG(DRIVER) <<
"ArmnnDriver::prepareModel()";
183 if (
const auto result = validate(model); !result.ok()) {
184 return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) <<
"Invalid Model: " << result.error();
186 if (
const auto result = validate(preference); !result.ok()) {
187 return NN_ERROR(ErrorStatus::INVALID_ARGUMENT)
188 <<
"Invalid ExecutionPreference: " << result.error();
190 if (
const auto result = validate(priority); !result.ok()) {
191 return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) <<
"Invalid Priority: " << result.error();
195 if (hasDeadlinePassed(deadline)) {
196 return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
211 const std::vector<SharedHandle>& modelCache,
212 const std::vector<SharedHandle>& dataCache,
213 const CacheToken& token)
const override 215 VLOG(DRIVER) <<
"ArmnnDriver::prepareModelFromCache()";
218 if (hasDeadlinePassed(deadline)) {
219 return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
232 GeneralResult<SharedBuffer>
allocate(
const BufferDesc&,
233 const std::vector<SharedPreparedModel>&,
234 const std::vector<BufferRole>&,
235 const std::vector<BufferRole>&)
const override 237 VLOG(DRIVER) <<
"ArmnnDriver::allocate()";
238 return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) <<
"ArmnnDriver::allocate -- does not support allocate.";
GeneralResult< void > wait() const override
Version getFeatureLevel() const override
GeneralResult< SharedPreparedModel > prepareModel(const Model &model, ExecutionPreference preference, Priority priority, OptionalTimePoint deadline, const std::vector< SharedHandle > &modelCache, const std::vector< SharedHandle > &dataCache, const CacheToken &token, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
armnn::IGpuAccTunedParametersPtr m_ClTunedParameters
bool GetFp16Enabled() const
const std::set< unsigned int > & GetForcedUnsupportedOperations() const
GeneralResult< std::vector< bool > > getSupportedOperations(const Model &model) const override
const std::string & getName() const override
armnn::IRuntimePtr m_Runtime
const std::vector< Extension > & getSupportedExtensions() const override
::android::nn::Model Model
Helper classes.
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
const std::string & getVersionString() const override
const std::string & GetRequestInputsAndOutputsDumpDir() const
ArmnnDriver(DriverOptions options)
GeneralResult< SharedPreparedModel > prepareModelFromCache(OptionalTimePoint deadline, const std::vector< SharedHandle > &modelCache, const std::vector< SharedHandle > &dataCache, const CacheToken &token) const override
const Capabilities & getCapabilities() const override
GeneralResult< SharedBuffer > allocate(const BufferDesc &, const std::vector< SharedPreparedModel > &, const std::vector< BufferRole > &, const std::vector< BufferRole > &) const override
static GeneralResult< SharedPreparedModel > PrepareArmnnModelFromCache(const armnn::IRuntimePtr &runtime, const armnn::IGpuAccTunedParametersPtr &clTunedParameters, const DriverOptions &options, const std::vector< SharedHandle > &modelCacheHandle, const std::vector< SharedHandle > &dataCacheHandle, const CacheToken &token, bool float32ToFloat16=false)
static const Capabilities & GetCapabilities(const armnn::IRuntimePtr &runtime)
const std::vector< armnn::BackendId > & GetBackends() const
static GeneralResult< SharedPreparedModel > PrepareArmnnModel(const armnn::IRuntimePtr &runtime, const armnn::IGpuAccTunedParametersPtr &clTunedParameters, const DriverOptions &options, const Model &model, const std::vector< SharedHandle > &modelCacheHandle, const std::vector< SharedHandle > &dataCacheHandle, const CacheToken &token, bool float32ToFloat16=false, Priority priority=Priority::MEDIUM)
std::pair< uint32_t, uint32_t > getNumberOfCacheFilesNeeded() const override
DeviceType getType() const override