From e48bdff741568236d3c0747ad3d18a8eba5b36dd Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Mon, 3 Sep 2018 13:50:50 +0100 Subject: IVGCVSW-1806 Refactored Android-NN-Driver, added common "getCapabilities", "getSupportedOperations" and "prepareModel" implementations * Added common base ArmnnDriverImpl class * Added common template implementation of the driver's "getCapabilities", "getSupportedOperations" and "prepareModel" methods * Refactored ArmnnPreparedModel and RequestThread to support HAL v1.1 models * Moved "getStatus" to the common base class, as it is shared by both HAL implementations * Refactored the code where necessary Change-Id: I747334730026d63b4002662523fb93608f67c899 --- 1.0/ArmnnDriver.hpp | 26 +++-- 1.0/ArmnnDriverImpl.cpp | 277 ------------------------------------------------ 1.0/ArmnnDriverImpl.hpp | 41 ------- 3 files changed, 17 insertions(+), 327 deletions(-) delete mode 100644 1.0/ArmnnDriverImpl.cpp delete mode 100644 1.0/ArmnnDriverImpl.hpp (limited to '1.0') diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp index 83484ca9..18e25968 100644 --- a/1.0/ArmnnDriver.hpp +++ b/1.0/ArmnnDriver.hpp @@ -7,8 +7,8 @@ #include -#include "ArmnnDriverImpl.hpp" #include "ArmnnDevice.hpp" +#include "../ArmnnDriverImpl.hpp" #include @@ -29,36 +29,44 @@ public: public: Return getCapabilities( - ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) + ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override { ALOGV("V1_0::ArmnnDriver::getCapabilities()"); - return ArmnnDriverImpl::getCapabilities(m_Runtime, cb); + return armnn_driver::ArmnnDriverImpl::getCapabilities(m_Runtime, + cb); } Return getSupportedOperations( const ::android::hardware::neuralnetworks::V1_0::Model& model, - ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) + ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override { ALOGV("V1_0::ArmnnDriver::getSupportedOperations()"); - return ArmnnDriverImpl::getSupportedOperations(m_Runtime, m_Options, model, cb); + return armnn_driver::ArmnnDriverImpl::getSupportedOperations(m_Runtime, + m_Options, + model, + cb); } Return prepareModel( const ::android::hardware::neuralnetworks::V1_0::Model& model, - const android::sp& cb) + const android::sp& cb) override { ALOGV("V1_0::ArmnnDriver::prepareModel()"); - return ArmnnDriverImpl::prepareModel(m_Runtime, m_ClTunedParameters, m_Options, model, cb); + return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, + m_ClTunedParameters, + m_Options, + model, + cb); } - Return getStatus() + Return getStatus() override { ALOGV("V1_0::ArmnnDriver::getStatus()"); - return ArmnnDriverImpl::getStatus(); + return armnn_driver::ArmnnDriverImpl::getStatus(); } }; diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp deleted file mode 100644 index 21a4f2e3..00000000 --- a/1.0/ArmnnDriverImpl.cpp +++ /dev/null @@ -1,277 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// See LICENSE file in the project root for full license information. -// - -#include "ArmnnDriverImpl.hpp" -#include "ModelToINetworkConverter.hpp" -#include "ArmnnPreparedModel.hpp" -#include "SystemPropertiesUtils.hpp" - -#if defined(ARMNN_ANDROID_P) -// The headers of the ML framework have changed between Android O and Android P. -// The validation functions have been moved into their own header, ValidateHal.h. -#include -#endif - -#include - -using namespace std; -using namespace android; -using namespace android::nn; -using namespace android::hardware; - -namespace -{ - -const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; -const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; -const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; -const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; - -void NotifyCallbackAndCheck(const sp& callback, - ErrorStatus errorStatus, - const sp& preparedModelPtr) -{ - Return returned = callback->notify(errorStatus, preparedModelPtr); - // This check is required, if the callback fails and it isn't checked it will bring down the service - if (!returned.isOk()) - { - ALOGE("V1_0::ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ", - returned.description().c_str()); - } -} - -Return FailPrepareModel(ErrorStatus error, - const string& message, - const sp& callback) -{ - ALOGW("V1_0::ArmnnDriverImpl::prepareModel: %s", message.c_str()); - NotifyCallbackAndCheck(callback, error, nullptr); - return error; -} - -} // namespace - -namespace armnn_driver -{ -namespace V1_0 -{ - -Return ArmnnDriverImpl::getCapabilities( - const armnn::IRuntimePtr& runtime, - neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) -{ - ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()"); - - neuralnetworks::V1_0::Capabilities capabilities; - if (runtime) - { - capabilities.float32Performance.execTime = - ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f); - - capabilities.float32Performance.powerUsage = - ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f); - - capabilities.quantized8Performance.execTime = - ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f); - - capabilities.quantized8Performance.powerUsage = - ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); - - cb(ErrorStatus::NONE, capabilities); - } - else - { - capabilities.float32Performance.execTime = 0; - capabilities.float32Performance.powerUsage = 0; - capabilities.quantized8Performance.execTime = 0; - capabilities.quantized8Performance.powerUsage = 0; - - cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); - } - - return Void(); -} - -Return ArmnnDriverImpl::getSupportedOperations( - const armnn::IRuntimePtr& runtime, - const DriverOptions& options, - const neuralnetworks::V1_0::Model& model, - neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) -{ - ALOGV("V1_0::ArmnnDriverImpl::getSupportedOperations()"); - - vector result; - - if (!runtime) - { - cb(ErrorStatus::DEVICE_UNAVAILABLE, result); - return Void(); - } - - // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway - if (!android::nn::validateModel(model)) - { - cb(ErrorStatus::INVALID_ARGUMENT, result); - return Void(); - } - - // Attempt to convert the model to an ArmNN input network (INetwork). - armnn_driver::ModelToINetworkConverter modelConverter(options.GetComputeDevice(), - model, options.GetForcedUnsupportedOperations()); - - if (modelConverter.GetConversionResult() != ConversionResult::Success - && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature) - { - cb(ErrorStatus::GENERAL_FAILURE, result); - return Void(); - } - - // Check each operation if it was converted successfully and copy the flags - // into the result (vector) that we need to return to Android - result.reserve(model.operations.size()); - for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); operationIdx++) - { - bool operationSupported = modelConverter.IsOperationSupported(operationIdx); - result.push_back(operationSupported); - } - - cb(ErrorStatus::NONE, result); - return Void(); -} - -Return ArmnnDriverImpl::prepareModel( - const armnn::IRuntimePtr& runtime, - const armnn::IGpuAccTunedParametersPtr& clTunedParameters, - const DriverOptions& options, - const neuralnetworks::V1_0::Model& model, - const sp& cb, - bool float32ToFloat16) -{ - ALOGV("V1_0::ArmnnDriverImpl::prepareModel()"); - - if (cb.get() == nullptr) - { - ALOGW("V1_0::ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel"); - return ErrorStatus::INVALID_ARGUMENT; - } - - if (!runtime) - { - return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, - "V1_0::ArmnnDriverImpl::prepareModel: Device unavailable", cb); - } - - if (!android::nn::validateModel(model)) - { - return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, - "V1_0::ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb); - } - - // Deliberately ignore any unsupported operations requested by the options - - // at this point we're being asked to prepare a model that we've already declared support for - // and the operation indices may be different to those in getSupportedOperations anyway. - set unsupportedOperations; - armnn_driver::ModelToINetworkConverter modelConverter(options.GetComputeDevice(), model, - unsupportedOperations); - - if (modelConverter.GetConversionResult() != ConversionResult::Success) - { - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb); - return ErrorStatus::NONE; - } - - // optimize the network - armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr); - armnn::OptimizerOptions OptOptions; - OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16; - - try - { - optNet = armnn::Optimize(*modelConverter.GetINetwork(), - {options.GetComputeDevice()}, - runtime->GetDeviceSpec(), - OptOptions); - } - catch (armnn::Exception &e) - { - stringstream message; - message << "armnn::Exception (" << e.what() << ") caught from optimize."; - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; - } - - // Check that the optimized network is valid. - if (!optNet) - { - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, - "V1_0::ArmnnDriverImpl::prepareModel: Invalid optimized network", cb); - return ErrorStatus::NONE; - } - - // Export the optimized network graph to a dot file if an output dump directory - // has been specified in the drivers' arguments. - ExportNetworkGraphToDotFile(*optNet, - options.GetRequestInputsAndOutputsDumpDir(), - model); - - // load it into the runtime - armnn::NetworkId netId = 0; - try - { - if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success) - { - return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, - "V1_0::ArmnnDriverImpl::prepareModel: Network could not be loaded", cb); - } - } - catch (armnn::Exception& e) - { - stringstream message; - message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork."; - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; - } - - unique_ptr preparedModel(new ArmnnPreparedModel( - netId, - runtime.get(), - model, - options.GetRequestInputsAndOutputsDumpDir(), - options.IsGpuProfilingEnabled() - )); - - // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if - // this is enabled) before the first 'real' inference which removes the overhead of the first inference. - preparedModel->ExecuteWithDummyInputs(); - - if (clTunedParameters && - options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters) - { - // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file. - try - { - clTunedParameters->Save(options.GetClTunedParametersFile().c_str()); - } - catch (const armnn::Exception& error) - { - ALOGE("V1_0::ArmnnDriverImpl: Failed to save CL tuned parameters file '%s': %s", - options.GetClTunedParametersFile().c_str(), error.what()); - } - } - - NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release()); - - return ErrorStatus::NONE; -} - -Return ArmnnDriverImpl::getStatus() -{ - ALOGV("V1_0::ArmnnDriverImpl::getStatus()"); - - return DeviceStatus::AVAILABLE; -} - -} // armnn_driver::namespace V1_0 -} // namespace armnn_driver diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp deleted file mode 100644 index 2628682d..00000000 --- a/1.0/ArmnnDriverImpl.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// See LICENSE file in the project root for full license information. -// - -#pragma once - -#include - -#include "DriverOptions.hpp" - -#include - -namespace armnn_driver -{ -namespace V1_0 -{ - -class ArmnnDriverImpl -{ -public: - static Return getCapabilities( - const armnn::IRuntimePtr& runtime, - ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb); - static Return getSupportedOperations( - const armnn::IRuntimePtr& runtime, - const DriverOptions& options, - const ::android::hardware::neuralnetworks::V1_0::Model& model, - ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb); - static Return prepareModel( - const armnn::IRuntimePtr& runtime, - const armnn::IGpuAccTunedParametersPtr& clTunedParameters, - const DriverOptions& options, - const ::android::hardware::neuralnetworks::V1_0::Model& model, - const android::sp& cb, - bool float32ToFloat16 = false); - static Return getStatus(); -}; - -} // namespace armnn_driver::V1_0 -} // namespace armnn_driver -- cgit v1.2.1