From d7c8eb93cb880e4bdc2fe721b22eeb1fa99bcf42 Mon Sep 17 00:00:00 2001 From: Ferran Balaguer Date: Mon, 1 Jul 2019 13:37:44 +0100 Subject: IVGCVSW-3282 Update getCapabilities_1_2 Signed-off-by: Ferran Balaguer Change-Id: I65ae8669d8365bf372fbf0d1eceb48cbe06ef92a --- 1.2/ArmnnDriverImpl.cpp | 93 +++++++++++++++++++++++++++++- test/1.2/Capabilities.cpp | 144 ++++++++++++++++++++++++++++++++++++++++++++++ test/Android.mk | 1 + 3 files changed, 235 insertions(+), 3 deletions(-) create mode 100644 test/1.2/Capabilities.cpp diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp index 97cfa5de..87ef08c2 100644 --- a/1.2/ArmnnDriverImpl.cpp +++ b/1.2/ArmnnDriverImpl.cpp @@ -13,7 +13,37 @@ namespace { -const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime"; +const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime"; + +const char *g_OperandTypeTensorFloat32PerformanceExecTime = "Armnn.operandTypeTensorFloat32Performance.execTime"; +const char *g_OperandTypeTensorFloat32PerformancePowerUsage = "Armnn.operandTypeTensorFloat32Performance.powerUsage"; + +const char *g_OperandTypeFloat32PerformanceExecTime = "Armnn.operandTypeFloat32Performance.execTime"; +const char *g_OperandTypeFloat32PerformancePowerUsage = "Armnn.operandTypeFloat32Performance.powerUsage"; + +const char *g_OperandTypeTensorFloat16PerformanceExecTime = "Armnn.operandTypeTensorFloat16Performance.execTime"; +const char *g_OperandTypeTensorFloat16PerformancePowerUsage = "Armnn.operandTypeTensorFloat16Performance.powerUsage"; + +const char *g_OperandTypeFloat16PerformanceExecTime = "Armnn.operandTypeFloat16Performance.execTime"; +const char *g_OperandTypeFloat16PerformancePowerUsage = "Armnn.operandTypeFloat16Performance.powerUsage"; + +const char *g_OperandTypeTensorQuant8AsymmPerformanceExecTime = + "Armnn.operandTypeTensorQuant8AsymmPerformance.execTime"; +const char *g_OperandTypeTensorQuant8AsymmPerformancePowerUsage = + "Armnn.operandTypeTensorQuant8AsymmPerformance.powerUsage"; + +const char *g_OperandTypeTensorQuant16SymmPerformanceExecTime = + "Armnn.operandTypeTensorQuant16SymmPerformance.execTime"; +const char *g_OperandTypeTensorQuant16SymmPerformancePowerUsage = + "Armnn.operandTypeTensorQuant16SymmPerformance.powerUsage"; + +const char *g_OperandTypeTensorInt32PerformanceExecTime = "Armnn.operandTypeTensorInt32Performance.execTime"; +const char *g_OperandTypeTensorInt32PerformancePowerUsage = "Armnn.operandTypeTensorInt32Performance.powerUsage"; + +const char *g_OperandTypeInt32PerformanceExecTime = "Armnn.operandTypeInt32Performance.execTime"; +const char *g_OperandTypeInt32PerformancePowerUsage = "Armnn.operandTypeInt32Performance.powerUsage"; + + void NotifyCallbackAndCheck(const sp& callback, ErrorStatus errorStatus, const sp& preparedModelPtr) @@ -181,13 +211,67 @@ Return ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runt V1_2::Capabilities capabilities; + float defaultValue = .1f; + if (runtime) { capabilities.relaxedFloat32toFloat16PerformanceScalar.execTime = - ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f); + ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, defaultValue); capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = - ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f); + ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, defaultValue); + + // Set the base value for all operand types + capabilities.operandPerformance = nonExtensionOperandPerformance({FLT_MAX, FLT_MAX}); + + // Load supported operand types + update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32, + { + .execTime = ParseSystemProperty(g_OperandTypeTensorFloat32PerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat32PerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::FLOAT32, + { + .execTime = ParseSystemProperty(g_OperandTypeFloat32PerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeFloat32PerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT16, + { + .execTime = ParseSystemProperty(g_OperandTypeTensorFloat16PerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat16PerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::FLOAT16, + { + .execTime = ParseSystemProperty(g_OperandTypeFloat16PerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeFloat16PerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM, + { + .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT16_SYMM, + { + .execTime = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::TENSOR_INT32, + { + .execTime = ParseSystemProperty(g_OperandTypeTensorInt32PerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeTensorInt32PerformancePowerUsage, defaultValue) + }); + + update(&capabilities.operandPerformance, OperandType::INT32, + { + .execTime = ParseSystemProperty(g_OperandTypeInt32PerformanceExecTime, defaultValue), + .powerUsage = ParseSystemProperty(g_OperandTypeInt32PerformancePowerUsage, defaultValue) + }); cb(ErrorStatus::NONE, capabilities); } @@ -196,6 +280,9 @@ Return ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runt capabilities.relaxedFloat32toFloat16PerformanceScalar.execTime = 0; capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = 0; + // Set the base value for all operand types + capabilities.operandPerformance = nonExtensionOperandPerformance({0.f, 0.0f}); + cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); } diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp new file mode 100644 index 00000000..f4142866 --- /dev/null +++ b/test/1.2/Capabilities.cpp @@ -0,0 +1,144 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../../1.2/ArmnnDriverImpl.hpp" + +#include "Utils.h" + +#include +#include + +#include + +#include + +using namespace std; + +struct CapabilitiesFixture +{ + CapabilitiesFixture() + { + const char* nullStr = ""; + + __system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", nullStr); + __system_property_set("Armnn.operandTypeTensorFloat32Performance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeFloat32Performance.execTime", nullStr); + __system_property_set("Armnn.operandTypeFloat32Performance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeTensorFloat16Performance.execTime", nullStr); + __system_property_set("Armnn.operandTypeTensorFloat16Performance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeFloat16Performance.execTime", nullStr); + __system_property_set("Armnn.operandTypeFloat16Performance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeTensorQuant8AsymmPerformance.execTime", nullStr); + __system_property_set("Armnn.operandTypeTensorQuant8AsymmPerformance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeTensorQuant16SymmPerformance.execTime", nullStr); + __system_property_set("Armnn.operandTypeTensorQuant16SymmPerformance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeTensorInt32Performance.execTime", nullStr); + __system_property_set("Armnn.operandTypeTensorInt32Performance.powerUsage", nullStr); + __system_property_set("Armnn.operandTypeInt32Performance.execTime", nullStr); + __system_property_set("Armnn.operandTypeInt32Performance.powerUsage", nullStr); + } + + ~CapabilitiesFixture(){} +}; + +void CheckOperandType(const V1_2::Capabilities& capabilities, OperandType type, float execTime, float powerUsage) +{ + PerformanceInfo perfInfo = android::nn::lookup(capabilities.operandPerformance, type); + BOOST_ASSERT(perfInfo.execTime == execTime); + BOOST_ASSERT(perfInfo.powerUsage == powerUsage); +} + +BOOST_AUTO_TEST_SUITE(CapabilitiesTests) + +BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime) +{ + using namespace armnn_driver::hal_1_2; + using namespace android::nn; + + auto getCapabilitiesFn = [&](ErrorStatus error, const V1_2::Capabilities& capabilities) + { + CheckOperandType(capabilities, OperandType::TENSOR_FLOAT32, 2.0f, 2.1f); + CheckOperandType(capabilities, OperandType::FLOAT32, 2.2f, 2.3f); + CheckOperandType(capabilities, OperandType::TENSOR_FLOAT16, 2.4f, 2.5f); + CheckOperandType(capabilities, OperandType::FLOAT16, 2.6f, 2.7f); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_ASYMM, 2.8f, 2.9f); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_SYMM, 3.0f, 3.1f); + CheckOperandType(capabilities, OperandType::TENSOR_INT32, 3.2f, 3.3f); + CheckOperandType(capabilities, OperandType::INT32, 3.4f, 3.5f); + + // Unsupported operands take FLT_MAX value + CheckOperandType(capabilities, OperandType::UINT32, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::BOOL, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::OEM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX); + + BOOST_ASSERT(error == ErrorStatus::NONE); + }; + + __system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", "2.0f"); + __system_property_set("Armnn.operandTypeTensorFloat32Performance.powerUsage", "2.1f"); + __system_property_set("Armnn.operandTypeFloat32Performance.execTime", "2.2f"); + __system_property_set("Armnn.operandTypeFloat32Performance.powerUsage", "2.3f"); + __system_property_set("Armnn.operandTypeTensorFloat16Performance.execTime", "2.4f"); + __system_property_set("Armnn.operandTypeTensorFloat16Performance.powerUsage", "2.5f"); + __system_property_set("Armnn.operandTypeFloat16Performance.execTime", "2.6f"); + __system_property_set("Armnn.operandTypeFloat16Performance.powerUsage", "2.7f"); + __system_property_set("Armnn.operandTypeTensorQuant8AsymmPerformance.execTime", "2.8f"); + __system_property_set("Armnn.operandTypeTensorQuant8AsymmPerformance.powerUsage", "2.9f"); + __system_property_set("Armnn.operandTypeTensorQuant16SymmPerformance.execTime", "3.0f"); + __system_property_set("Armnn.operandTypeTensorQuant16SymmPerformance.powerUsage", "3.1f"); + __system_property_set("Armnn.operandTypeTensorInt32Performance.execTime", "3.2f"); + __system_property_set("Armnn.operandTypeTensorInt32Performance.powerUsage", "3.3f"); + __system_property_set("Armnn.operandTypeInt32Performance.execTime", "3.4f"); + __system_property_set("Armnn.operandTypeInt32Performance.powerUsage", "3.5f"); + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn); +} + +BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesUndefined) +{ + using namespace armnn_driver::hal_1_2; + using namespace android::nn; + + float defaultValue = .1f; + + auto getCapabilitiesFn = [&](ErrorStatus error, const V1_2::Capabilities& capabilities) + { + CheckOperandType(capabilities, OperandType::TENSOR_FLOAT32, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::FLOAT32, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::TENSOR_FLOAT16, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::FLOAT16, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_ASYMM, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_SYMM, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::TENSOR_INT32, defaultValue, defaultValue); + CheckOperandType(capabilities, OperandType::INT32, defaultValue, defaultValue); + + // Unsupported operands take FLT_MAX value + CheckOperandType(capabilities, OperandType::UINT32, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::BOOL, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::OEM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX); + + BOOST_ASSERT(error == ErrorStatus::NONE); + }; + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/test/Android.mk b/test/Android.mk index a1725996..a078e0a3 100644 --- a/test/Android.mk +++ b/test/Android.mk @@ -256,6 +256,7 @@ LOCAL_SRC_FILES := \ 1.1/Mean.cpp \ 1.1/Transpose.cpp \ 1.2/Dilation.cpp \ + 1.2/Capabilities.cpp \ Tests.cpp \ UtilsTests.cpp \ Concurrent.cpp \ -- cgit v1.2.1