From ce3e84a8d449cbf31cee57e30f0eef6a96c0ce94 Mon Sep 17 00:00:00 2001 From: telsoa01 Date: Fri, 31 Aug 2018 09:31:35 +0100 Subject: Release 18.08 --- 1.0/ArmnnDriver.hpp | 66 ++++++++++++ 1.0/ArmnnDriverImpl.cpp | 277 ++++++++++++++++++++++++++++++++++++++++++++++++ 1.0/ArmnnDriverImpl.hpp | 41 +++++++ 3 files changed, 384 insertions(+) create mode 100644 1.0/ArmnnDriver.hpp create mode 100644 1.0/ArmnnDriverImpl.cpp create mode 100644 1.0/ArmnnDriverImpl.hpp (limited to '1.0') diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp new file mode 100644 index 00000000..83484ca9 --- /dev/null +++ b/1.0/ArmnnDriver.hpp @@ -0,0 +1,66 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include + +#include "ArmnnDriverImpl.hpp" +#include "ArmnnDevice.hpp" + +#include + +namespace armnn_driver +{ +namespace V1_0 +{ + +class ArmnnDriver : public ArmnnDevice, public ::android::hardware::neuralnetworks::V1_0::IDevice +{ +public: + ArmnnDriver(DriverOptions options) + : ArmnnDevice(std::move(options)) + { + ALOGV("V1_0::ArmnnDriver::ArmnnDriver()"); + } + ~ArmnnDriver() {} + +public: + Return getCapabilities( + ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) + { + ALOGV("V1_0::ArmnnDriver::getCapabilities()"); + + return ArmnnDriverImpl::getCapabilities(m_Runtime, cb); + } + + Return getSupportedOperations( + const ::android::hardware::neuralnetworks::V1_0::Model& model, + ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) + { + ALOGV("V1_0::ArmnnDriver::getSupportedOperations()"); + + return ArmnnDriverImpl::getSupportedOperations(m_Runtime, m_Options, model, cb); + } + + Return prepareModel( + const ::android::hardware::neuralnetworks::V1_0::Model& model, + const android::sp& cb) + { + ALOGV("V1_0::ArmnnDriver::prepareModel()"); + + return ArmnnDriverImpl::prepareModel(m_Runtime, m_ClTunedParameters, m_Options, model, cb); + } + + Return getStatus() + { + ALOGV("V1_0::ArmnnDriver::getStatus()"); + + return ArmnnDriverImpl::getStatus(); + } +}; + +} // armnn_driver::namespace V1_0 +} // namespace armnn_driver diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp new file mode 100644 index 00000000..5429ebed --- /dev/null +++ b/1.0/ArmnnDriverImpl.cpp @@ -0,0 +1,277 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ArmnnDriverImpl.hpp" +#include "ModelToINetworkConverter.hpp" +#include "ArmnnPreparedModel.hpp" +#include "SystemPropertiesUtils.hpp" + +#if defined(ARMNN_ANDROID_P) +// The headers of the ML framework have changed between Android O and Android P. +// The validation functions have been moved into their own header, ValidateHal.h. +#include +#endif + +#include + +using namespace std; +using namespace android; +using namespace android::nn; +using namespace android::hardware; + +namespace +{ + +const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; +const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; +const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; +const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; + +void NotifyCallbackAndCheck(const sp& callback, + ErrorStatus errorStatus, + const sp& preparedModelPtr) +{ + Return returned = callback->notify(errorStatus, preparedModelPtr); + // This check is required, if the callback fails and it isn't checked it will bring down the service + if (!returned.isOk()) + { + ALOGE("V1_0::ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ", + returned.description().c_str()); + } +} + +Return FailPrepareModel(ErrorStatus error, + const string& message, + const sp& callback) +{ + ALOGW("V1_0::ArmnnDriverImpl::prepareModel: %s", message.c_str()); + NotifyCallbackAndCheck(callback, error, nullptr); + return error; +} + +} // namespace + +namespace armnn_driver +{ +namespace V1_0 +{ + +Return ArmnnDriverImpl::getCapabilities( + const armnn::IRuntimePtr& runtime, + neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) +{ + ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()"); + + neuralnetworks::V1_0::Capabilities capabilities; + if (runtime) + { + capabilities.float32Performance.execTime = + ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f); + + capabilities.float32Performance.powerUsage = + ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f); + + capabilities.quantized8Performance.execTime = + ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f); + + capabilities.quantized8Performance.powerUsage = + ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); + + cb(ErrorStatus::NONE, capabilities); + } + else + { + capabilities.float32Performance.execTime = 0; + capabilities.float32Performance.powerUsage = 0; + capabilities.quantized8Performance.execTime = 0; + capabilities.quantized8Performance.powerUsage = 0; + + cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + } + + return Void(); +} + +Return ArmnnDriverImpl::getSupportedOperations( + const armnn::IRuntimePtr& runtime, + const DriverOptions& options, + const neuralnetworks::V1_0::Model& model, + neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) +{ + ALOGV("V1_0::ArmnnDriverImpl::getSupportedOperations()"); + + vector result; + + if (!runtime) + { + cb(ErrorStatus::DEVICE_UNAVAILABLE, result); + return Void(); + } + + // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway + if (!android::nn::validateModel(model)) + { + cb(ErrorStatus::INVALID_ARGUMENT, result); + return Void(); + } + + // Attempt to convert the model to an ArmNN input network (INetwork). + ModelToINetworkConverter modelConverter(options.GetComputeDevice(), model, + options.GetForcedUnsupportedOperations()); + + if (modelConverter.GetConversionResult() != ConversionResult::Success + && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature) + { + cb(ErrorStatus::GENERAL_FAILURE, result); + return Void(); + } + + // Check each operation if it was converted successfully and copy the flags + // into the result (vector) that we need to return to Android + result.reserve(model.operations.size()); + for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); operationIdx++) + { + bool operationSupported = modelConverter.IsOperationSupported(operationIdx); + result.push_back(operationSupported); + } + + cb(ErrorStatus::NONE, result); + return Void(); +} + +Return ArmnnDriverImpl::prepareModel( + const armnn::IRuntimePtr& runtime, + const armnn::IGpuAccTunedParametersPtr& clTunedParameters, + const DriverOptions& options, + const neuralnetworks::V1_0::Model& model, + const sp& cb, + bool float32ToFloat16) +{ + ALOGV("V1_0::ArmnnDriverImpl::prepareModel()"); + + if (cb.get() == nullptr) + { + ALOGW("V1_0::ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel"); + return ErrorStatus::INVALID_ARGUMENT; + } + + if (!runtime) + { + return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, + "V1_0::ArmnnDriverImpl::prepareModel: Device unavailable", cb); + } + + if (!android::nn::validateModel(model)) + { + return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, + "V1_0::ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb); + } + + // Deliberately ignore any unsupported operations requested by the options - + // at this point we're being asked to prepare a model that we've already declared support for + // and the operation indices may be different to those in getSupportedOperations anyway. + set unsupportedOperations; + ModelToINetworkConverter modelConverter(options.GetComputeDevice(), model, + unsupportedOperations); + + if (modelConverter.GetConversionResult() != ConversionResult::Success) + { + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb); + return ErrorStatus::NONE; + } + + // optimize the network + armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr); + armnn::OptimizerOptions OptOptions; + OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16; + + try + { + optNet = armnn::Optimize(*modelConverter.GetINetwork(), + {options.GetComputeDevice()}, + runtime->GetDeviceSpec(), + OptOptions); + } + catch (armnn::Exception &e) + { + stringstream message; + message << "armnn::Exception (" << e.what() << ") caught from optimize."; + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return ErrorStatus::NONE; + } + + // Check that the optimized network is valid. + if (!optNet) + { + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, + "V1_0::ArmnnDriverImpl::prepareModel: Invalid optimized network", cb); + return ErrorStatus::NONE; + } + + // Export the optimized network graph to a dot file if an output dump directory + // has been specified in the drivers' arguments. + ExportNetworkGraphToDotFile(*optNet, + options.GetRequestInputsAndOutputsDumpDir(), + model); + + // load it into the runtime + armnn::NetworkId netId = 0; + try + { + if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success) + { + return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, + "V1_0::ArmnnDriverImpl::prepareModel: Network could not be loaded", cb); + } + } + catch (armnn::Exception& e) + { + stringstream message; + message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork."; + FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return ErrorStatus::NONE; + } + + unique_ptr preparedModel(new ArmnnPreparedModel( + netId, + runtime.get(), + model, + options.GetRequestInputsAndOutputsDumpDir(), + options.IsGpuProfilingEnabled() + )); + + // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if + // this is enabled) before the first 'real' inference which removes the overhead of the first inference. + preparedModel->ExecuteWithDummyInputs(); + + if (clTunedParameters && + options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters) + { + // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file. + try + { + clTunedParameters->Save(options.GetClTunedParametersFile().c_str()); + } + catch (const armnn::Exception& error) + { + ALOGE("V1_0::ArmnnDriverImpl: Failed to save CL tuned parameters file '%s': %s", + options.GetClTunedParametersFile().c_str(), error.what()); + } + } + + NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release()); + + return ErrorStatus::NONE; +} + +Return ArmnnDriverImpl::getStatus() +{ + ALOGV("V1_0::ArmnnDriverImpl::getStatus()"); + + return DeviceStatus::AVAILABLE; +} + +} // armnn_driver::namespace V1_0 +} // namespace armnn_driver diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp new file mode 100644 index 00000000..2628682d --- /dev/null +++ b/1.0/ArmnnDriverImpl.hpp @@ -0,0 +1,41 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include + +#include "DriverOptions.hpp" + +#include + +namespace armnn_driver +{ +namespace V1_0 +{ + +class ArmnnDriverImpl +{ +public: + static Return getCapabilities( + const armnn::IRuntimePtr& runtime, + ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb); + static Return getSupportedOperations( + const armnn::IRuntimePtr& runtime, + const DriverOptions& options, + const ::android::hardware::neuralnetworks::V1_0::Model& model, + ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb); + static Return prepareModel( + const armnn::IRuntimePtr& runtime, + const armnn::IGpuAccTunedParametersPtr& clTunedParameters, + const DriverOptions& options, + const ::android::hardware::neuralnetworks::V1_0::Model& model, + const android::sp& cb, + bool float32ToFloat16 = false); + static Return getStatus(); +}; + +} // namespace armnn_driver::V1_0 +} // namespace armnn_driver -- cgit v1.2.1