From 79250ab173e7dfa2c6057854c0d4b8dafe377fb0 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Tue, 4 Sep 2018 16:28:10 +0100 Subject: IVGCVSW-1806 Restored the fp16 flag left behind during the previous refactoring * Split getCapabilities and getCapabilities_1_1 as it was before * Setting relaxedFloat32toFloat16Performance when using HAL 1.1 as required by one of the VTS test Change-Id: Iff883b8cbd0511596e9848fa40e91e4fa58d4260 --- 1.0/ArmnnDriver.hpp | 5 +-- 1.0/ArmnnDriverImpl.cpp | 68 +++++++++++++++++++++++++++++++++++++++++ 1.0/ArmnnDriverImpl.hpp | 28 +++++++++++++++++ 1.1/ArmnnDriver.hpp | 14 ++++++--- 1.1/ArmnnDriverImpl.cpp | 73 ++++++++++++++++++++++++++++++++++++++++++++ 1.1/ArmnnDriverImpl.hpp | 28 +++++++++++++++++ Android.mk | 3 ++ ArmnnDriverImpl.cpp | 44 +------------------------- ArmnnDriverImpl.hpp | 11 +------ ModelToINetworkConverter.cpp | 2 +- ModelToINetworkConverter.hpp | 4 +-- RequestThread.cpp | 2 +- 12 files changed, 218 insertions(+), 64 deletions(-) create mode 100644 1.0/ArmnnDriverImpl.cpp create mode 100644 1.0/ArmnnDriverImpl.hpp create mode 100644 1.1/ArmnnDriverImpl.cpp create mode 100644 1.1/ArmnnDriverImpl.hpp diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp index 18e25968..97613118 100644 --- a/1.0/ArmnnDriver.hpp +++ b/1.0/ArmnnDriver.hpp @@ -8,6 +8,7 @@ #include #include "ArmnnDevice.hpp" +#include "ArmnnDriverImpl.hpp" #include "../ArmnnDriverImpl.hpp" #include @@ -33,8 +34,8 @@ public: { ALOGV("V1_0::ArmnnDriver::getCapabilities()"); - return armnn_driver::ArmnnDriverImpl::getCapabilities(m_Runtime, - cb); + return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, + cb); } Return getSupportedOperations( diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp new file mode 100644 index 00000000..925d9dbe --- /dev/null +++ b/1.0/ArmnnDriverImpl.cpp @@ -0,0 +1,68 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ArmnnDriverImpl.hpp" +#include "../SystemPropertiesUtils.hpp" + +#include + +using namespace std; +using namespace android; +using namespace android::nn; +using namespace android::hardware; + +namespace +{ + +const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; +const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; +const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; +const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; + +} // anonymous namespace + +namespace armnn_driver +{ +namespace V1_0 +{ + +Return ArmnnDriverImpl::getCapabilities( + const armnn::IRuntimePtr& runtime, + neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) +{ + ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()"); + + neuralnetworks::V1_0::Capabilities capabilities; + if (runtime) + { + capabilities.float32Performance.execTime = + ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f); + + capabilities.float32Performance.powerUsage = + ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f); + + capabilities.quantized8Performance.execTime = + ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f); + + capabilities.quantized8Performance.powerUsage = + ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); + + cb(ErrorStatus::NONE, capabilities); + } + else + { + capabilities.float32Performance.execTime = 0; + capabilities.float32Performance.powerUsage = 0; + capabilities.quantized8Performance.execTime = 0; + capabilities.quantized8Performance.powerUsage = 0; + + cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + } + + return Void(); +} + +} // namespace armnn_driver::V1_0 +} // namespace armnn_driver diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp new file mode 100644 index 00000000..b44350d8 --- /dev/null +++ b/1.0/ArmnnDriverImpl.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include + +#include "../DriverOptions.hpp" + +#include + +namespace armnn_driver +{ +namespace V1_0 +{ + +class ArmnnDriverImpl +{ +public: + static Return getCapabilities( + const armnn::IRuntimePtr& runtime, + ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb); +}; + +} // namespace armnn_driver::V1_0 +} // namespace armnn_driver diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp index f55aad4b..079d9cd1 100644 --- a/1.1/ArmnnDriver.hpp +++ b/1.1/ArmnnDriver.hpp @@ -8,7 +8,9 @@ #include #include "ArmnnDevice.hpp" +#include "ArmnnDriverImpl.hpp" #include "../ArmnnDriverImpl.hpp" +#include "../1.0/ArmnnDriverImpl.hpp" #include @@ -33,8 +35,8 @@ public: { ALOGV("V1_1::ArmnnDriver::getCapabilities()"); - return armnn_driver::ArmnnDriverImpl::getCapabilities(m_Runtime, - cb); + return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, + cb); } Return getSupportedOperations( @@ -67,8 +69,8 @@ public: { ALOGV("V1_1::ArmnnDriver::getCapabilities_1_1()"); - return armnn_driver::ArmnnDriverImpl::getCapabilities(m_Runtime, - cb); + return V1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, + cb); } Return getSupportedOperations_1_1( @@ -103,7 +105,9 @@ public: m_ClTunedParameters, m_Options, model, - cb); + cb, + model.relaxComputationFloat32toFloat16 + && m_Options.GetFp16Enabled()); } Return getStatus() override diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp new file mode 100644 index 00000000..1d063cbc --- /dev/null +++ b/1.1/ArmnnDriverImpl.cpp @@ -0,0 +1,73 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ArmnnDriverImpl.hpp" +#include "../SystemPropertiesUtils.hpp" + +#include + +using namespace std; +using namespace android; +using namespace android::nn; +using namespace android::hardware; + +namespace +{ + +const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; +const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; +const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; +const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; +const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime"; + +} // anonymous namespace + +namespace armnn_driver +{ +namespace V1_1 +{ + +Return ArmnnDriverImpl::getCapabilities_1_1( + const armnn::IRuntimePtr& runtime, + neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb) +{ + ALOGV("V1_1::ArmnnDriverImpl::getCapabilities()"); + + neuralnetworks::V1_1::Capabilities capabilities; + if (runtime) + { + capabilities.float32Performance.execTime = + ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f); + + capabilities.float32Performance.powerUsage = + ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f); + + capabilities.quantized8Performance.execTime = + ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f); + + capabilities.quantized8Performance.powerUsage = + ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); + + capabilities.relaxedFloat32toFloat16Performance.execTime = + ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f); + + cb(ErrorStatus::NONE, capabilities); + } + else + { + capabilities.float32Performance.execTime = 0; + capabilities.float32Performance.powerUsage = 0; + capabilities.quantized8Performance.execTime = 0; + capabilities.quantized8Performance.powerUsage = 0; + capabilities.relaxedFloat32toFloat16Performance.execTime = 0; + + cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + } + + return Void(); +} + +} // namespace armnn_driver::V1_1 +} // namespace armnn_driver diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp new file mode 100644 index 00000000..c309b69d --- /dev/null +++ b/1.1/ArmnnDriverImpl.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include + +#include "../DriverOptions.hpp" + +#include + +namespace armnn_driver +{ +namespace V1_1 +{ + +class ArmnnDriverImpl +{ +public: + static Return getCapabilities_1_1( + const armnn::IRuntimePtr& runtime, + ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb); +}; + +} // namespace armnn_driver::V1_1 +} // namespace armnn_driver diff --git a/Android.mk b/Android.mk index 38450705..1fd74492 100644 --- a/Android.mk +++ b/Android.mk @@ -47,6 +47,7 @@ LOCAL_CFLAGS+= \ endif # ARMNN_DRIVER_DEBUG == 1 LOCAL_SRC_FILES := \ + 1.0/ArmnnDriverImpl.cpp \ ArmnnDriverImpl.cpp \ DriverOptions.cpp \ ArmnnDevice.cpp \ @@ -118,6 +119,8 @@ LOCAL_CFLAGS+= \ endif # ARMNN_DRIVER_DEBUG == 1 LOCAL_SRC_FILES := \ + 1.0/ArmnnDriverImpl.cpp \ + 1.1/ArmnnDriverImpl.cpp \ ArmnnDriverImpl.cpp \ DriverOptions.cpp \ ArmnnDevice.cpp \ diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp index 0298f3b7..ce66e6d5 100644 --- a/ArmnnDriverImpl.cpp +++ b/ArmnnDriverImpl.cpp @@ -26,11 +26,6 @@ using namespace android::hardware; namespace { -const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; -const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; -const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; -const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; - void NotifyCallbackAndCheck(const sp& callback, ErrorStatus errorStatus, const sp& preparedModelPtr) @@ -58,43 +53,6 @@ Return FailPrepareModel(ErrorStatus error, namespace armnn_driver { -template -Return ArmnnDriverImpl::getCapabilities( - const armnn::IRuntimePtr& runtime, - HalGetCapabilities_cb cb) -{ - ALOGV("ArmnnDriverImpl::getCapabilities()"); - - HalCapabilities capabilities; - if (runtime) - { - capabilities.float32Performance.execTime = - ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f); - - capabilities.float32Performance.powerUsage = - ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f); - - capabilities.quantized8Performance.execTime = - ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f); - - capabilities.quantized8Performance.powerUsage = - ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); - - cb(ErrorStatus::NONE, capabilities); - } - else - { - capabilities.float32Performance.execTime = 0; - capabilities.float32Performance.powerUsage = 0; - capabilities.quantized8Performance.execTime = 0; - capabilities.quantized8Performance.powerUsage = 0; - - cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); - } - - return Void(); -} - template Return ArmnnDriverImpl::getSupportedOperations( const armnn::IRuntimePtr& runtime, @@ -281,7 +239,7 @@ Return ArmnnDriverImpl::getStatus() // Class template specializations template class ArmnnDriverImpl; -#ifdef ARMNN_ANDROID_NN_V1_1 +#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. template class ArmnnDriverImpl; #endif diff --git a/ArmnnDriverImpl.hpp b/ArmnnDriverImpl.hpp index c0600977..87da581b 100644 --- a/ArmnnDriverImpl.hpp +++ b/ArmnnDriverImpl.hpp @@ -17,17 +17,13 @@ namespace armnn_driver struct HalVersion_1_0 { using Model = ::android::hardware::neuralnetworks::V1_0::Model; - using Capabilities = ::android::hardware::neuralnetworks::V1_0::Capabilities; - using getCapabilities_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb; using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb; }; -#if defined(ARMNN_ANDROID_NN_V1_1) +#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. struct HalVersion_1_1 { using Model = ::android::hardware::neuralnetworks::V1_1::Model; - using Capabilities = ::android::hardware::neuralnetworks::V1_1::Capabilities; - using getCapabilities_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb; using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb; }; #endif @@ -37,13 +33,8 @@ class ArmnnDriverImpl { public: using HalModel = typename HalVersion::Model; - using HalCapabilities = typename HalVersion::Capabilities; - using HalGetCapabilities_cb = typename HalVersion::getCapabilities_cb; using HalGetSupportedOperations_cb = typename HalVersion::getSupportedOperations_cb; - static Return getCapabilities( - const armnn::IRuntimePtr& runtime, - HalGetCapabilities_cb cb); static Return getSupportedOperations( const armnn::IRuntimePtr& runtime, const DriverOptions& options, diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp index 6db32a05..6ae1cb57 100644 --- a/ModelToINetworkConverter.cpp +++ b/ModelToINetworkConverter.cpp @@ -653,7 +653,7 @@ bool ModelToINetworkConverter::ConvertOperation(const neuralnetworks } } -#if defined(ARMNN_ANDROID_NN_V1_1) +#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. template bool ModelToINetworkConverter::ConvertOperation(const neuralnetworks::V1_1::Operation& operation) { diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp index c28ebdcd..5ee6a341 100644 --- a/ModelToINetworkConverter.hpp +++ b/ModelToINetworkConverter.hpp @@ -56,7 +56,7 @@ public: private: void Convert(); -#if defined(ARMNN_ANDROID_NN_V1_1) +#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_1::Operation& operation); bool ConvertDiv(const ::android::hardware::neuralnetworks::V1_1::Operation& operation); @@ -205,4 +205,4 @@ private: std::vector m_MemPools; }; -} // armnn_driver \ No newline at end of file +} // armnn_driver diff --git a/RequestThread.cpp b/RequestThread.cpp index 8e44d8d2..c5c9bbfc 100644 --- a/RequestThread.cpp +++ b/RequestThread.cpp @@ -134,7 +134,7 @@ void RequestThread::Process() // Class template specializations template class RequestThread; -#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1. +#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. template class RequestThread; #endif -- cgit v1.2.1