From b0717b5241a15e3e4d37a1b51b6e5fd9a92a664f Mon Sep 17 00:00:00 2001 From: arovir01 Date: Wed, 5 Sep 2018 17:03:25 +0100 Subject: IVGCVSW-1806: Refactor Android-NN-Driver ModelToINetworkConverter * Moved conversion logic into new V1_0 and V1_1 HalPolicy classes * Extracted common helper functions into ConversionUtils class Change-Id: I1ab50edc266dd528c0cb22a5cd1aa65e103674d9 --- 1.1/ArmnnDriver.hpp | 105 +++++++++++++++++++++++------------------------- 1.1/ArmnnDriverImpl.cpp | 36 +++++++---------- 1.1/ArmnnDriverImpl.hpp | 9 ++--- 1.1/HalPolicy.cpp | 89 ++++++++++++++++++++++++++++++++++++++++ 1.1/HalPolicy.hpp | 31 ++++++++++++++ 5 files changed, 189 insertions(+), 81 deletions(-) create mode 100644 1.1/HalPolicy.cpp create mode 100644 1.1/HalPolicy.hpp (limited to '1.1') diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp index 38248053..ef8bca8a 100644 --- a/1.1/ArmnnDriver.hpp +++ b/1.1/ArmnnDriver.hpp @@ -9,114 +9,109 @@ #include "ArmnnDevice.hpp" #include "ArmnnDriverImpl.hpp" +#include "HalPolicy.hpp" + #include "../ArmnnDriverImpl.hpp" #include "../1.0/ArmnnDriverImpl.hpp" +#include "../1.0/HalPolicy.hpp" #include namespace armnn_driver { -namespace V1_1 +namespace hal_1_1 { -class ArmnnDriver : public ArmnnDevice, public ::android::hardware::neuralnetworks::V1_1::IDevice +class ArmnnDriver : public ArmnnDevice, public V1_1::IDevice { public: ArmnnDriver(DriverOptions options) : ArmnnDevice(std::move(options)) { - ALOGV("V1_1::ArmnnDriver::ArmnnDriver()"); + ALOGV("hal_1_1::ArmnnDriver::ArmnnDriver()"); } ~ArmnnDriver() {} public: - Return getCapabilities( - ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override + Return getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override { - ALOGV("V1_1::ArmnnDriver::getCapabilities()"); + ALOGV("hal_1_1::ArmnnDriver::getCapabilities()"); - return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, - cb); + return hal_1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb); } - Return getSupportedOperations( - const ::android::hardware::neuralnetworks::V1_0::Model& model, - ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override + Return getSupportedOperations(const V1_0::Model& model, + V1_0::IDevice::getSupportedOperations_cb cb) override { - ALOGV("V1_1::ArmnnDriver::getSupportedOperations()"); + ALOGV("hal_1_1::ArmnnDriver::getSupportedOperations()"); - return armnn_driver::ArmnnDriverImpl::getSupportedOperations(m_Runtime, - m_Options, - model, - cb); + return armnn_driver::ArmnnDriverImpl::getSupportedOperations(m_Runtime, + m_Options, + model, + cb); } - Return prepareModel( - const ::android::hardware::neuralnetworks::V1_0::Model& model, - const android::sp& cb) override + Return prepareModel(const V1_0::Model& model, + const android::sp& cb) override { - ALOGV("V1_1::ArmnnDriver::prepareModel()"); + ALOGV("hal_1_1::ArmnnDriver::prepareModel()"); - return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, - m_ClTunedParameters, - m_Options, - model, - cb); + return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, + m_ClTunedParameters, + m_Options, + model, + cb); } - Return getCapabilities_1_1( - ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb) override + Return getCapabilities_1_1(V1_1::IDevice::getCapabilities_1_1_cb cb) override { - ALOGV("V1_1::ArmnnDriver::getCapabilities_1_1()"); + ALOGV("hal_1_1::ArmnnDriver::getCapabilities_1_1()"); - return V1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, - cb); + return hal_1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, cb); } - Return getSupportedOperations_1_1( - const ::android::hardware::neuralnetworks::V1_1::Model& model, - ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb) override + Return getSupportedOperations_1_1(const V1_1::Model& model, + V1_1::IDevice::getSupportedOperations_1_1_cb cb) override { - ALOGV("V1_1::ArmnnDriver::getSupportedOperations_1_1()"); + ALOGV("hal_1_1::ArmnnDriver::getSupportedOperations_1_1()"); - return armnn_driver::ArmnnDriverImpl::getSupportedOperations(m_Runtime, - m_Options, - model, - cb); + return armnn_driver::ArmnnDriverImpl::getSupportedOperations(m_Runtime, + m_Options, + model, + cb); } - Return prepareModel_1_1( - const ::android::hardware::neuralnetworks::V1_1::Model& model, - ::android::hardware::neuralnetworks::V1_1::ExecutionPreference preference, - const android::sp& cb) override + Return prepareModel_1_1(const V1_1::Model& model, + V1_1::ExecutionPreference preference, + const android::sp& cb) override { - ALOGV("V1_1::ArmnnDriver::prepareModel_1_1()"); + ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1()"); if (!(preference == ExecutionPreference::LOW_POWER || preference == ExecutionPreference::FAST_SINGLE_ANSWER || preference == ExecutionPreference::SUSTAINED_SPEED)) { - ALOGV("V1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference"); + ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference"); cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); return ErrorStatus::INVALID_ARGUMENT; } - return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, - m_ClTunedParameters, - m_Options, - model, - cb, - model.relaxComputationFloat32toFloat16 - && m_Options.GetFp16Enabled()); + return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, + m_ClTunedParameters, + m_Options, + model, + cb, + model.relaxComputationFloat32toFloat16 + && m_Options.GetFp16Enabled()); } Return getStatus() override { - ALOGV("V1_1::ArmnnDriver::getStatus()"); + ALOGV("hal_1_1::ArmnnDriver::getStatus()"); - return armnn_driver::ArmnnDriverImpl::getStatus(); + return armnn_driver::ArmnnDriverImpl::getStatus(); } }; -} // armnn_driver::namespace V1_1 -} // namespace armnn_driver +} // namespace hal_1_1 +} // namespace armnn_driver \ No newline at end of file diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp index 0a689539..d8939a07 100644 --- a/1.1/ArmnnDriverImpl.cpp +++ b/1.1/ArmnnDriverImpl.cpp @@ -8,34 +8,28 @@ #include -using namespace std; -using namespace android; -using namespace android::nn; -using namespace android::hardware; - namespace { -const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; -const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; -const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; -const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; +const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; +const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; +const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; +const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime"; } // anonymous namespace namespace armnn_driver { -namespace V1_1 +namespace hal_1_1 { -Return ArmnnDriverImpl::getCapabilities_1_1( - const armnn::IRuntimePtr& runtime, - neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb) +Return ArmnnDriverImpl::getCapabilities_1_1(const armnn::IRuntimePtr& runtime, + V1_1::IDevice::getCapabilities_1_1_cb cb) { - ALOGV("V1_1::ArmnnDriverImpl::getCapabilities()"); + ALOGV("hal_1_1::ArmnnDriverImpl::getCapabilities()"); - neuralnetworks::V1_1::Capabilities capabilities; + V1_1::Capabilities capabilities; if (runtime) { capabilities.float32Performance.execTime = @@ -57,10 +51,10 @@ Return ArmnnDriverImpl::getCapabilities_1_1( } else { - capabilities.float32Performance.execTime = 0; - capabilities.float32Performance.powerUsage = 0; - capabilities.quantized8Performance.execTime = 0; - capabilities.quantized8Performance.powerUsage = 0; + capabilities.float32Performance.execTime = 0; + capabilities.float32Performance.powerUsage = 0; + capabilities.quantized8Performance.execTime = 0; + capabilities.quantized8Performance.powerUsage = 0; capabilities.relaxedFloat32toFloat16Performance.execTime = 0; cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); @@ -69,5 +63,5 @@ Return ArmnnDriverImpl::getCapabilities_1_1( return Void(); } -} // namespace armnn_driver::V1_1 -} // namespace armnn_driver +} // namespace hal_1_1 +} // namespace armnn_driver \ No newline at end of file diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp index bdb25854..4308bacb 100644 --- a/1.1/ArmnnDriverImpl.hpp +++ b/1.1/ArmnnDriverImpl.hpp @@ -13,16 +13,15 @@ namespace armnn_driver { -namespace V1_1 +namespace hal_1_1 { class ArmnnDriverImpl { public: - static Return getCapabilities_1_1( - const armnn::IRuntimePtr& runtime, - ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb); + static Return getCapabilities_1_1(const armnn::IRuntimePtr& runtime, + V1_1::IDevice::getCapabilities_1_1_cb cb); }; -} // namespace armnn_driver::V1_1 +} // namespace hal_1_1 } // namespace armnn_driver diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp new file mode 100644 index 00000000..0e669432 --- /dev/null +++ b/1.1/HalPolicy.cpp @@ -0,0 +1,89 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "HalPolicy.hpp" + +#include "../1.0/HalPolicy.hpp" + +namespace armnn_driver +{ +namespace hal_1_1 +{ + +bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data) +{ + if (compliantWithV1_0(operation)) + { + hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation); + hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model); + + return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data); + } + else + { + switch (operation.type) + { + case V1_1::OperationType::DIV: + return ConvertDiv(operation, model, data); + default: + return Fail("%s: Operation type %s not supported in ArmnnDriver", + __func__, toString(operation.type).c_str()); + } + } +} + +bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return false; + } + + const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); + + if (!IsLayerSupported(__func__, + armnn::IsDivisionSupported, + data.m_Compute, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo)) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + + return Fail("%s: ProcessActivation failed", __func__); +} + +} // namespace hal_1_1 +} // namespace armnn_driver \ No newline at end of file diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp new file mode 100644 index 00000000..3722d49d --- /dev/null +++ b/1.1/HalPolicy.hpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "ConversionUtils.hpp" + +#include + +namespace armnn_driver +{ +namespace hal_1_1 +{ + +class HalPolicy +{ +public: + using Model = V1_1::Model; + using Operation = V1_1::Operation; + using getSupportedOperations_cb = V1_1::IDevice::getSupportedOperations_1_1_cb; + + static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data); + +private: + static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data); +}; + +} // namespace hal_1_1 +} // namespace armnn_driver -- cgit v1.2.1