From e48bdff741568236d3c0747ad3d18a8eba5b36dd Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Mon, 3 Sep 2018 13:50:50 +0100 Subject: IVGCVSW-1806 Refactored Android-NN-Driver, added common "getCapabilities", "getSupportedOperations" and "prepareModel" implementations * Added common base ArmnnDriverImpl class * Added common template implementation of the driver's "getCapabilities", "getSupportedOperations" and "prepareModel" methods * Refactored ArmnnPreparedModel and RequestThread to support HAL v1.1 models * Moved "getStatus" to the common base class, as it is shared by both HAL implementations * Refactored the code where necessary Change-Id: I747334730026d63b4002662523fb93608f67c899 --- ArmnnDriverImpl.cpp | 288 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 288 insertions(+) create mode 100644 ArmnnDriverImpl.cpp (limited to 'ArmnnDriverImpl.cpp') diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp new file mode 100644 index 00000000..0298f3b7 --- /dev/null +++ b/ArmnnDriverImpl.cpp @@ -0,0 +1,288 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#define LOG_TAG "ArmnnDriver" + +#include "ArmnnDriverImpl.hpp" +#include "ModelToINetworkConverter.hpp" +#include "ArmnnPreparedModel.hpp" +#include "SystemPropertiesUtils.hpp" + +#if defined(ARMNN_ANDROID_P) +// The headers of the ML framework have changed between Android O and Android P. +// The validation functions have been moved into their own header, ValidateHal.h. +#include +#endif + +#include + +using namespace std; +using namespace android; +using namespace android::nn; +using namespace android::hardware; + +namespace +{ + +const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; +const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; +const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; +const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; + +void NotifyCallbackAndCheck(const sp& callback, + ErrorStatus errorStatus, + const sp& preparedModelPtr) +{ + Return returned = callback->notify(errorStatus, preparedModelPtr); + // This check is required, if the callback fails and it isn't checked it will bring down the service + if (!returned.isOk()) + { + ALOGE("ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ", + returned.description().c_str()); + } +} + +Return FailPrepareModel(ErrorStatus error, + const string& message, + const sp& callback) +{ + ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str()); + NotifyCallbackAndCheck(callback, error, nullptr); + return error; +} + +} // namespace + +namespace armnn_driver +{ + +template +Return ArmnnDriverImpl::getCapabilities( + const armnn::IRuntimePtr& runtime, + HalGetCapabilities_cb cb) +{ + ALOGV("ArmnnDriverImpl::getCapabilities()"); + + HalCapabilities capabilities; + if (runtime) + { + capabilities.float32Performance.execTime = + ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f); + + capabilities.float32Performance.powerUsage = + ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f); + + capabilities.quantized8Performance.execTime = + ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f); + + capabilities.quantized8Performance.powerUsage = + ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); + + cb(ErrorStatus::NONE, capabilities); + } + else + { + capabilities.float32Performance.execTime = 0; + capabilities.float32Performance.powerUsage = 0; + capabilities.quantized8Performance.execTime = 0; + capabilities.quantized8Performance.powerUsage = 0; + + cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + } + + return Void(); +} + +template +Return ArmnnDriverImpl::getSupportedOperations( + const armnn::IRuntimePtr& runtime, + const DriverOptions& options, + const HalModel& model, + HalGetSupportedOperations_cb cb) +{ + ALOGV("ArmnnDriverImpl::getSupportedOperations()"); + + vector result; + + if (!runtime) + { + cb(ErrorStatus::DEVICE_UNAVAILABLE, result); + return Void(); + } + + // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway. + if (!android::nn::validateModel(model)) + { + cb(ErrorStatus::INVALID_ARGUMENT, result); + return Void(); + } + + // Attempt to convert the model to an ArmNN input network (INetwork). + ModelToINetworkConverter modelConverter(options.GetComputeDevice(), + model, + options.GetForcedUnsupportedOperations()); + + if (modelConverter.GetConversionResult() != ConversionResult::Success + && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature) + { + cb(ErrorStatus::GENERAL_FAILURE, result); + return Void(); + } + + // Check each operation if it was converted successfully and copy the flags + // into the result (vector) that we need to return to Android. + result.reserve(model.operations.size()); + for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); operationIdx++) + { + bool operationSupported = modelConverter.IsOperationSupported(operationIdx); + result.push_back(operationSupported); + } + + cb(ErrorStatus::NONE, result); + return Void(); +} + +template +Return ArmnnDriverImpl::prepareModel( + const armnn::IRuntimePtr& runtime, + const armnn::IGpuAccTunedParametersPtr& clTunedParameters, + const DriverOptions& options, + const HalModel& model, + const sp& cb, + bool float32ToFloat16) +{ + ALOGV("ArmnnDriverImpl::prepareModel()"); + + if (cb.get() == nullptr) + { + ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel"); + return ErrorStatus::INVALID_ARGUMENT; + } + + if (!runtime) + { + return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, + "ArmnnDriverImpl::prepareModel: Device unavailable", cb); + } + + if (!android::nn::validateModel(model)) + { + return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, + "ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb); + } + + // Deliberately ignore any unsupported operations requested by the options - + // at this point we're being asked to prepare a model that we've already declared support for + // and the operation indices may be different to those in getSupportedOperations anyway. + set unsupportedOperations; + ModelToINetworkConverter modelConverter(options.GetComputeDevice(), + model, + unsupportedOperations); + + if (modelConverter.GetConversionResult() != ConversionResult::Success) + { + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, + "ArmnnDriverImpl::prepareModel: ModelToINetworkConverter failed", cb); + return ErrorStatus::NONE; + } + + // Optimize the network + armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr); + armnn::OptimizerOptions OptOptions; + OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16; + + try + { + optNet = armnn::Optimize(*modelConverter.GetINetwork(), + {options.GetComputeDevice()}, + runtime->GetDeviceSpec(), + OptOptions); + } + catch (armnn::Exception &e) + { + stringstream message; + message << "ArmnnDriverImpl::prepareModel: armnn::Exception (" << e.what() << ") caught from optimize."; + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return ErrorStatus::NONE; + } + + // Check that the optimized network is valid. + if (!optNet) + { + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, + "ArmnnDriverImpl::prepareModel: Invalid optimized network", cb); + return ErrorStatus::NONE; + } + + // Export the optimized network graph to a dot file if an output dump directory + // has been specified in the drivers' arguments. + ExportNetworkGraphToDotFile(*optNet, options.GetRequestInputsAndOutputsDumpDir(), model); + + // Load it into the runtime. + armnn::NetworkId netId = 0; + try + { + if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success) + { + return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, + "ArmnnDriverImpl::prepareModel: Network could not be loaded", cb); + } + } + catch (armnn::Exception& e) + { + stringstream message; + message << "ArmnnDriverImpl::prepareModel: armnn::Exception (" << e.what()<< ") caught from LoadNetwork."; + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return ErrorStatus::NONE; + } + + unique_ptr> preparedModel( + new ArmnnPreparedModel( + netId, + runtime.get(), + model, + options.GetRequestInputsAndOutputsDumpDir(), + options.IsGpuProfilingEnabled())); + + // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if + // this is enabled) before the first 'real' inference which removes the overhead of the first inference. + preparedModel->ExecuteWithDummyInputs(); + + if (clTunedParameters && + options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters) + { + // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file. + try + { + clTunedParameters->Save(options.GetClTunedParametersFile().c_str()); + } + catch (const armnn::Exception& error) + { + ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s", + options.GetClTunedParametersFile().c_str(), error.what()); + } + } + + NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release()); + + return ErrorStatus::NONE; +} + +template +Return ArmnnDriverImpl::getStatus() +{ + ALOGV("ArmnnDriver::getStatus()"); + + return DeviceStatus::AVAILABLE; +} + +// Class template specializations +template class ArmnnDriverImpl; + +#ifdef ARMNN_ANDROID_NN_V1_1 +template class ArmnnDriverImpl; +#endif + +} // namespace armnn_driver -- cgit v1.2.1