From ec1e5b8d0a0eeba7eae0f1fdb5e32c72f8a8093c Mon Sep 17 00:00:00 2001 From: Kevin May Date: Wed, 26 Feb 2020 17:00:39 +0000 Subject: IVGCVSW-4473 Android R pre Hal 1_3 build changes * Update ErrorStatus to V1_0::ErrorStatus * Update Request to V1_0::Request * Update OperandType to V1_2::OperandType * Add namespace android::nn::hal in ArmnnDriverImpl for R only * Add missing g_RelaxedFloat32toFloat16PerformancePowerUsageName * Add namespace V1_0 or V1_1 where necessary * Update Android.mk with R macro and android.hardware.neuralnetworks@1.3 * Remove androidnn.go * include IAllocator in DriverTestHelpers * Remove unused LOCAL_CFLAGS Signed-off-by: Kevin May Change-Id: I1787f1ed6784b3bbec017536d87d49197405e853 Signed-off-by: Kevin May --- 1.0/ArmnnDriver.hpp | 4 +- 1.0/ArmnnDriverImpl.cpp | 4 +- 1.0/ArmnnDriverImpl.hpp | 4 ++ 1.1/ArmnnDriver.hpp | 15 +++--- 1.1/ArmnnDriverImpl.cpp | 31 ++++++----- 1.1/ArmnnDriverImpl.hpp | 8 +++ 1.1/HalPolicy.hpp | 2 + 1.2/ArmnnDriver.hpp | 35 ++++++------ 1.2/ArmnnDriverImpl.cpp | 85 ++++++++++++++++------------- 1.2/ArmnnDriverImpl.hpp | 19 ++++--- 1.2/HalPolicy.cpp | 4 +- 1.2/HalPolicy.hpp | 2 + Android.bp | 17 ------ Android.mk | 89 ++++++++++++++++++++++++++----- ArmnnDriverImpl.cpp | 48 ++++++++--------- ArmnnDriverImpl.hpp | 6 ++- ArmnnPreparedModel.cpp | 55 ++++++++++--------- ArmnnPreparedModel.hpp | 4 +- ArmnnPreparedModel_1_2.cpp | 130 +++++++++++++++++++++++++-------------------- ArmnnPreparedModel_1_2.hpp | 16 +++--- ConversionUtils.hpp | 4 ++ Utils.cpp | 6 --- Utils.hpp | 4 ++ androidnn.go | 44 --------------- test/1.1/Mean.cpp | 6 +-- test/1.1/Transpose.cpp | 2 +- test/1.2/Capabilities.cpp | 82 ++++++++++++++-------------- test/Android.mk | 44 ++++++++++----- test/Concat.cpp | 26 ++++----- test/Concurrent.cpp | 2 +- test/Convolution2D.hpp | 2 +- test/DriverTestHelpers.cpp | 41 +++++++------- test/DriverTestHelpers.hpp | 58 ++++++++++---------- test/FullyConnected.cpp | 14 ++--- test/GenericLayerTests.cpp | 37 +++++-------- test/Lstm.hpp | 14 ++--- test/Tests.cpp | 6 +-- 37 files changed, 527 insertions(+), 443 deletions(-) delete mode 100644 androidnn.go diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp index 035d4484..b18f0650 100644 --- a/1.0/ArmnnDriver.hpp +++ b/1.0/ArmnnDriver.hpp @@ -46,8 +46,8 @@ public: return armnn_driver::ArmnnDriverImpl::getSupportedOperations(m_Runtime, m_Options, model, cb); } - Return prepareModel(const V1_0::Model& model, - const android::sp& cb) override + Return prepareModel(const V1_0::Model& model, + const android::sp& cb) override { ALOGV("hal_1_0::ArmnnDriver::prepareModel()"); diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp index a35bb0e9..57f828c0 100644 --- a/1.0/ArmnnDriverImpl.cpp +++ b/1.0/ArmnnDriverImpl.cpp @@ -43,7 +43,7 @@ Return ArmnnDriverImpl::getCapabilities(const armnn::IRuntimePtr& runtime, capabilities.quantized8Performance.powerUsage = ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f); - cb(ErrorStatus::NONE, capabilities); + cb(V1_0::ErrorStatus::NONE, capabilities); } else { @@ -52,7 +52,7 @@ Return ArmnnDriverImpl::getCapabilities(const armnn::IRuntimePtr& runtime, capabilities.quantized8Performance.execTime = 0; capabilities.quantized8Performance.powerUsage = 0; - cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities); } return Void(); diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp index 7f033e05..bb93e2e1 100644 --- a/1.0/ArmnnDriverImpl.hpp +++ b/1.0/ArmnnDriverImpl.hpp @@ -11,6 +11,10 @@ #include +#ifdef ARMNN_ANDROID_R +using namespace android::nn::hal; +#endif + namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; namespace armnn_driver diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp index baae6350..a6849abc 100644 --- a/1.1/ArmnnDriver.hpp +++ b/1.1/ArmnnDriver.hpp @@ -33,6 +33,7 @@ public: ~ArmnnDriver() {} public: + Return getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override { ALOGV("hal_1_1::ArmnnDriver::getCapabilities()"); @@ -51,8 +52,8 @@ public: cb); } - Return prepareModel(const V1_0::Model& model, - const android::sp& cb) override + Return prepareModel(const V1_0::Model& model, + const android::sp& cb) override { ALOGV("hal_1_1::ArmnnDriver::prepareModel()"); @@ -81,9 +82,9 @@ public: cb); } - Return prepareModel_1_1(const V1_1::Model& model, - V1_1::ExecutionPreference preference, - const android::sp& cb) override + Return prepareModel_1_1(const V1_1::Model& model, + V1_1::ExecutionPreference preference, + const android::sp& cb) override { ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1()"); @@ -92,8 +93,8 @@ public: preference == ExecutionPreference::SUSTAINED_SPEED)) { ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference"); - cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); - return ErrorStatus::INVALID_ARGUMENT; + cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr); + return V1_0::ErrorStatus::INVALID_ARGUMENT; } return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp index d8939a07..1d1aaa75 100644 --- a/1.1/ArmnnDriverImpl.cpp +++ b/1.1/ArmnnDriverImpl.cpp @@ -11,11 +11,12 @@ namespace { -const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; -const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; -const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; -const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; -const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime"; +const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime"; +const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage"; +const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime"; +const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage"; +const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime"; +const char *g_RelaxedFloat32toFloat16PerformancePowerUsageName = "ArmNN.relaxedFloat32toFloat16Performance.powerUsage"; } // anonymous namespace @@ -47,17 +48,21 @@ Return ArmnnDriverImpl::getCapabilities_1_1(const armnn::IRuntimePtr& runt capabilities.relaxedFloat32toFloat16Performance.execTime = ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f); - cb(ErrorStatus::NONE, capabilities); + capabilities.relaxedFloat32toFloat16Performance.powerUsage = + ParseSystemProperty(g_RelaxedFloat32toFloat16PerformancePowerUsageName, .1f); + + cb(V1_0::ErrorStatus::NONE, capabilities); } else { - capabilities.float32Performance.execTime = 0; - capabilities.float32Performance.powerUsage = 0; - capabilities.quantized8Performance.execTime = 0; - capabilities.quantized8Performance.powerUsage = 0; - capabilities.relaxedFloat32toFloat16Performance.execTime = 0; - - cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + capabilities.float32Performance.execTime = 0; + capabilities.float32Performance.powerUsage = 0; + capabilities.quantized8Performance.execTime = 0; + capabilities.quantized8Performance.powerUsage = 0; + capabilities.relaxedFloat32toFloat16Performance.execTime = 0; + capabilities.relaxedFloat32toFloat16Performance.powerUsage = 0; + + cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities); } return Void(); diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp index 4308bacb..f49dee0e 100644 --- a/1.1/ArmnnDriverImpl.hpp +++ b/1.1/ArmnnDriverImpl.hpp @@ -11,6 +11,14 @@ #include +#ifdef ARMNN_ANDROID_R +using namespace android::nn::hal; +#endif + + +namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; +namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; + namespace armnn_driver { namespace hal_1_1 diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp index dd8558b3..806686bf 100644 --- a/1.1/HalPolicy.hpp +++ b/1.1/HalPolicy.hpp @@ -9,6 +9,8 @@ #include +namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; + namespace armnn_driver { namespace hal_1_1 diff --git a/1.2/ArmnnDriver.hpp b/1.2/ArmnnDriver.hpp index 177cab63..6dba2e9a 100644 --- a/1.2/ArmnnDriver.hpp +++ b/1.2/ArmnnDriver.hpp @@ -29,6 +29,7 @@ namespace hal_1_2 class ArmnnDriver : public ArmnnDevice, public V1_2::IDevice { public: + ArmnnDriver(DriverOptions options) : ArmnnDevice(std::move(options)) { @@ -57,8 +58,8 @@ public: cb); } - Return prepareModel(const V1_0::Model& model, - const android::sp& cb) override + Return prepareModel(const V1_0::Model& model, + const android::sp& cb) override { ALOGV("hal_1_2::ArmnnDriver::prepareModel()"); @@ -86,9 +87,9 @@ public: cb); } - Return prepareModel_1_1(const V1_1::Model& model, - V1_1::ExecutionPreference preference, - const android::sp& cb) override + Return prepareModel_1_1(const V1_1::Model& model, + V1_1::ExecutionPreference preference, + const android::sp& cb) override { ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1()"); @@ -97,8 +98,8 @@ public: preference == ExecutionPreference::SUSTAINED_SPEED)) { ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1: Invalid execution preference"); - cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); - return ErrorStatus::INVALID_ARGUMENT; + cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr); + return V1_0::ErrorStatus::INVALID_ARGUMENT; } return armnn_driver::ArmnnDriverImpl::prepareModel(m_Runtime, @@ -121,7 +122,7 @@ public: { ALOGV("hal_1_2::ArmnnDriver::getVersionString()"); - cb(ErrorStatus::NONE, "ArmNN"); + cb(V1_0::ErrorStatus::NONE, "ArmNN"); return Void(); } @@ -129,22 +130,22 @@ public: { ALOGV("hal_1_2::ArmnnDriver::getType()"); - cb(ErrorStatus::NONE, V1_2::DeviceType::CPU); + cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU); return Void(); } - Return prepareModelFromCache( + Return prepareModelFromCache( const android::hardware::hidl_vec&, const android::hardware::hidl_vec&, const HidlToken&, const sp& callback) { ALOGV("hal_1_2::ArmnnDriver::prepareModelFromCache()"); - callback->notify_1_2(ErrorStatus::GENERAL_FAILURE, nullptr); - return ErrorStatus::GENERAL_FAILURE; + callback->notify_1_2(V1_0::ErrorStatus::GENERAL_FAILURE, nullptr); + return V1_0::ErrorStatus::GENERAL_FAILURE; } - Return prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference, + Return prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference, const android::hardware::hidl_vec&, const android::hardware::hidl_vec&, const HidlToken&, const android::sp& cb) @@ -156,8 +157,8 @@ public: preference == ExecutionPreference::SUSTAINED_SPEED)) { ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_2: Invalid execution preference"); - cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); - return ErrorStatus::INVALID_ARGUMENT; + cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr); + return V1_0::ErrorStatus::INVALID_ARGUMENT; } return ArmnnDriverImpl::prepareArmnnModel_1_2(m_Runtime, @@ -172,7 +173,7 @@ public: Return getSupportedExtensions(getSupportedExtensions_cb cb) { ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()"); - cb(ErrorStatus::NONE, {/* No extensions. */}); + cb(V1_0::ErrorStatus::NONE, {/* No extensions. */}); return Void(); } @@ -199,7 +200,7 @@ public: ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()"); // Set both numbers to be 0 for cache not supported. - cb(ErrorStatus::NONE, 0, 0); + cb(V1_0::ErrorStatus::NONE, 0, 0); return Void(); } }; diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp index 691156fa..bfa730b5 100644 --- a/1.2/ArmnnDriverImpl.cpp +++ b/1.2/ArmnnDriverImpl.cpp @@ -57,7 +57,7 @@ const char *g_OperandTypeInt32PerformancePowerUsage = "Armnn.operandType void NotifyCallbackAndCheck(const sp& callback, - ErrorStatus errorStatus, + V1_0::ErrorStatus errorStatus, const sp& preparedModelPtr) { Return returned = callback->notify_1_2(errorStatus, preparedModelPtr); @@ -69,9 +69,9 @@ void NotifyCallbackAndCheck(const sp& callback, } } -Return FailPrepareModel(ErrorStatus error, - const std::string& message, - const sp& callback) +Return FailPrepareModel(V1_0::ErrorStatus error, + const std::string& message, + const sp& callback) { ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str()); NotifyCallbackAndCheck(callback, error, nullptr); @@ -85,29 +85,30 @@ namespace armnn_driver namespace hal_1_2 { -Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime, - const armnn::IGpuAccTunedParametersPtr& clTunedParameters, - const DriverOptions& options, - const V1_2::Model& model, - const sp& cb, - bool float32ToFloat16) +Return ArmnnDriverImpl::prepareArmnnModel_1_2( + const armnn::IRuntimePtr& runtime, + const armnn::IGpuAccTunedParametersPtr& clTunedParameters, + const DriverOptions& options, + const V1_2::Model& model, + const sp& cb, + bool float32ToFloat16) { ALOGV("ArmnnDriverImpl::prepareArmnnModel_1_2()"); if (cb.get() == nullptr) { ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel"); - return ErrorStatus::INVALID_ARGUMENT; + return V1_0::ErrorStatus::INVALID_ARGUMENT; } if (!runtime) { - return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb); + return FailPrepareModel(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb); } if (!android::nn::validateModel(model)) { - return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb); + return FailPrepareModel(V1_0::ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb); } // Deliberately ignore any unsupported operations requested by the options - @@ -120,8 +121,8 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime if (modelConverter.GetConversionResult() != ConversionResult::Success) { - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb); + return V1_0::ErrorStatus::NONE; } // Optimize the network @@ -142,8 +143,8 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime { std::stringstream message; message << "Exception (" << e.what() << ") caught from optimize."; - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return V1_0::ErrorStatus::NONE; } // Check that the optimized network is valid. @@ -155,8 +156,8 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime { message << "\n" << msg; } - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return V1_0::ErrorStatus::NONE; } // Export the optimized network graph to a dot file if an output dump directory @@ -170,15 +171,15 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime { if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success) { - return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb); + return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb); } } catch (std::exception& e) { std::stringstream message; message << "Exception (" << e.what()<< ") caught from LoadNetwork."; - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return V1_0::ErrorStatus::NONE; } // Now that we have a networkId for the graph rename the dump file to use it @@ -199,7 +200,7 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime // this is enabled) before the first 'real' inference which removes the overhead of the first inference. if (!preparedModel->ExecuteWithDummyInputs()) { - return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb); + return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb); } if (clTunedParameters && @@ -217,9 +218,9 @@ Return ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntime } } - NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release()); + NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel.release()); - return ErrorStatus::NONE; + return V1_0::ErrorStatus::NONE; } Return ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runtime, @@ -240,52 +241,56 @@ Return ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runt ParseSystemProperty(g_RelaxedFloat32toFloat16PerformancePowerUsage, defaultValue); // Set the base value for all operand types + #ifdef ARMNN_ANDROID_R + capabilities.operandPerformance = nonExtensionOperandPerformance({FLT_MAX, FLT_MAX}); + #else capabilities.operandPerformance = nonExtensionOperandPerformance({FLT_MAX, FLT_MAX}); + #endif // Load supported operand types - update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32, { .execTime = ParseSystemProperty(g_OperandTypeTensorFloat32PerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat32PerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::FLOAT32, + update(&capabilities.operandPerformance, V1_2::OperandType::FLOAT32, { .execTime = ParseSystemProperty(g_OperandTypeFloat32PerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeFloat32PerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT16, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT16, { .execTime = ParseSystemProperty(g_OperandTypeTensorFloat16PerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat16PerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::FLOAT16, + update(&capabilities.operandPerformance, V1_2::OperandType::FLOAT16, { .execTime = ParseSystemProperty(g_OperandTypeFloat16PerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeFloat16PerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM, { .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_SYMM, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_SYMM, { .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT16_SYMM, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT16_SYMM, { .execTime = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, { .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformanceExecTime, defaultValue), @@ -293,19 +298,19 @@ Return ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runt ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::TENSOR_INT32, + update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_INT32, { .execTime = ParseSystemProperty(g_OperandTypeTensorInt32PerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeTensorInt32PerformancePowerUsage, defaultValue) }); - update(&capabilities.operandPerformance, OperandType::INT32, + update(&capabilities.operandPerformance, V1_2::OperandType::INT32, { .execTime = ParseSystemProperty(g_OperandTypeInt32PerformanceExecTime, defaultValue), .powerUsage = ParseSystemProperty(g_OperandTypeInt32PerformancePowerUsage, defaultValue) }); - cb(ErrorStatus::NONE, capabilities); + cb(V1_0::ErrorStatus::NONE, capabilities); } else { @@ -313,13 +318,17 @@ Return ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runt capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = 0; // Set the base value for all operand types + #ifdef ARMNN_ANDROID_R + capabilities.operandPerformance = nonExtensionOperandPerformance({0.f, 0.0f}); + #else capabilities.operandPerformance = nonExtensionOperandPerformance({0.f, 0.0f}); + #endif - cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities); + cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities); } return Void(); } } // namespace hal_1_2 -} // namespace armnn_driver +} // namespace armnn_driver \ No newline at end of file diff --git a/1.2/ArmnnDriverImpl.hpp b/1.2/ArmnnDriverImpl.hpp index b3c65079..73ba133e 100644 --- a/1.2/ArmnnDriverImpl.hpp +++ b/1.2/ArmnnDriverImpl.hpp @@ -11,6 +11,13 @@ #include +#ifdef ARMNN_ANDROID_R +using namespace android::nn::hal; +#endif + +namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; +namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; + namespace armnn_driver { namespace hal_1_2 @@ -19,12 +26,12 @@ namespace hal_1_2 class ArmnnDriverImpl { public: - static Return prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime, - const armnn::IGpuAccTunedParametersPtr& clTunedParameters, - const DriverOptions& options, - const V1_2::Model& model, - const android::sp& cb, - bool float32ToFloat16 = false); + static Return prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime, + const armnn::IGpuAccTunedParametersPtr& clTunedParameters, + const DriverOptions& options, + const V1_2::Model& model, + const android::sp& cb, + bool float32ToFloat16 = false); static Return getCapabilities_1_2(const armnn::IRuntimePtr& runtime, V1_2::IDevice::getCapabilities_1_2_cb cb); diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 8e4ef8a8..b3ccc47f 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -26,9 +26,9 @@ using namespace armnn; namespace { -bool IsQSymmDequantizeForWeights(const Operation& operation, const Model& model) +bool IsQSymmDequantizeForWeights(const HalPolicy::Operation& operation, const HalPolicy::Model& model) { - const Operand* operand = GetInputOperand(operation, 0, model); + const HalPolicy::Operand* operand = GetInputOperand(operation, 0, model); if (!operand) { return false; diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index e0a5c2fc..cd4f2da4 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -11,6 +11,8 @@ #include +namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; + namespace armnn_driver { namespace hal_1_2 diff --git a/Android.bp b/Android.bp index 7632c8c1..a2c80531 100644 --- a/Android.bp +++ b/Android.bp @@ -3,23 +3,6 @@ // SPDX-License-Identifier: MIT // -bootstrap_go_package { - name: "armnn_nn_driver", - pkgPath: "android-nn-driver", - deps: [ - "blueprint", - "blueprint-pathtools", - "blueprint-proptools", - "soong", - "soong-android", - "soong-cc", - ], - srcs: [ - "androidnn.go", - ], - pluginFor: [ "soong_build" ], -} - //////////////////////////////////////////// // // // static boost libs // diff --git a/Android.mk b/Android.mk index 73ae8ca2..6cc85eec 100644 --- a/Android.mk +++ b/Android.mk @@ -8,6 +8,7 @@ LOCAL_PATH := $(ANDROID_NN_DRIVER_LOCAL_PATH) P_OR_LATER := 0 Q_OR_LATER := 0 +R_OR_LATER := 0 ifeq ($(PLATFORM_VERSION),9) P_OR_LATER := 1 @@ -25,6 +26,12 @@ P_OR_LATER := 1 Q_OR_LATER := 1 endif # PLATFORM_VERSION == Q +ifeq ($(PLATFORM_VERSION),R) +P_OR_LATER := 1 +Q_OR_LATER := 1 +R_OR_LATER := 1 +endif # PLATFORM_VERSION == R + CPP_VERSION := c++14 ifeq ($(Q_OR_LATER),1) @@ -55,6 +62,10 @@ ifeq ($(ARMNN_REF_ENABLE),0) ARMNN_REF_ENABLED := 0 endif +ifeq ($(PLATFORM_VERSION),R) +ARMNN_COMPUTE_CL_ENABLED := 0 +endif # PLATFORM_VERSION == R + ####################### # libarmnn-driver@1.0 # ####################### @@ -84,12 +95,11 @@ LOCAL_CFLAGS := \ -Werror \ -Wno-format-security -ifeq ($(P_OR_LATER),1) -# Required to build with the changes made to the Android ML framework starting from Android P, -# regardless of the HAL version used for the build. +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(PLATFORM_VERSION),R) LOCAL_CFLAGS+= \ - -DARMNN_ANDROID_P -endif # PLATFORM_VERSION == 9 + -DARMNN_ANDROID_R +endif # R or later ifeq ($(ARMNN_DRIVER_DEBUG),1) LOCAL_CFLAGS+= \ @@ -164,7 +174,12 @@ LOCAL_SHARED_LIBRARIES+= \ libfmq \ libcutils \ android.hardware.neuralnetworks@1.2 -endif # PLATFORM_VERSION == Q +endif # Q or later + +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ @@ -205,7 +220,6 @@ LOCAL_CFLAGS := \ -fexceptions \ -Werror \ -Wno-format-security \ - -DARMNN_ANDROID_P \ -DARMNN_ANDROID_NN_V1_1 ifeq ($(ARMNN_DRIVER_DEBUG),1) @@ -218,6 +232,12 @@ LOCAL_CFLAGS += \ -DBOOST_NO_AUTO_PTR endif # PLATFORM_VERSION == Q or later +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(PLATFORM_VERSION),R) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_CFLAGS += \ -DARMCOMPUTECL_ENABLED @@ -279,6 +299,11 @@ LOCAL_SHARED_LIBRARIES+= \ android.hardware.neuralnetworks@1.2 endif # PLATFORM_VERSION == Q +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -314,7 +339,7 @@ LOCAL_CFLAGS := \ -fexceptions \ -Werror \ -Wno-format-security \ - -DARMNN_ANDROID_Q \ + -DBOOST_NO_AUTO_PTR \ -DARMNN_ANDROID_NN_V1_2 ifeq ($(ARMNN_DRIVER_DEBUG),1) @@ -322,10 +347,11 @@ LOCAL_CFLAGS+= \ -UNDEBUG endif # ARMNN_DRIVER_DEBUG == 1 -ifeq ($(Q_OR_LATER),1) -LOCAL_CFLAGS += \ - -DBOOST_NO_AUTO_PTR -endif # PLATFORM_VERSION == Q or later +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(PLATFORM_VERSION),R) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_CFLAGS += \ @@ -387,6 +413,11 @@ LOCAL_SHARED_LIBRARIES := \ android.hardware.neuralnetworks@1.1 \ android.hardware.neuralnetworks@1.2 +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -433,6 +464,12 @@ LOCAL_CFLAGS += \ -DBOOST_NO_AUTO_PTR endif # PLATFORM_VERSION == Q or later +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(PLATFORM_VERSION),R) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later + LOCAL_SRC_FILES := \ service.cpp @@ -468,6 +505,7 @@ ifeq ($(P_OR_LATER),1) LOCAL_SHARED_LIBRARIES+= \ android.hardware.neuralnetworks@1.1 endif # PLATFORM_VERSION == 9 + ifeq ($(Q_OR_LATER),1) LOCAL_SHARED_LIBRARIES+= \ libnativewindow \ @@ -477,6 +515,11 @@ LOCAL_SHARED_LIBRARIES+= \ android.hardware.neuralnetworks@1.2 endif # PLATFORM_VERSION == Q +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -525,6 +568,12 @@ LOCAL_CFLAGS += \ -DBOOST_NO_AUTO_PTR endif # PLATFORM_VERSION == Q or later +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(PLATFORM_VERSION),R) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later + LOCAL_SRC_FILES := \ service.cpp @@ -564,6 +613,11 @@ LOCAL_SHARED_LIBRARIES+= \ android.hardware.neuralnetworks@1.2 endif # PLATFORM_VERSION == Q +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # PLATFORM_VERSION == R + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -605,6 +659,12 @@ LOCAL_CFLAGS += \ -UNDEBUG endif # ARMNN_DRIVER_DEBUG == 1 +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(PLATFORM_VERSION),R) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later + LOCAL_SRC_FILES := \ service.cpp @@ -640,6 +700,11 @@ LOCAL_SHARED_LIBRARIES := \ android.hardware.neuralnetworks@1.1 \ android.hardware.neuralnetworks@1.2 +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp index 14af3c06..eab95989 100644 --- a/ArmnnDriverImpl.cpp +++ b/ArmnnDriverImpl.cpp @@ -26,7 +26,7 @@ namespace { void NotifyCallbackAndCheck(const sp& callback, - ErrorStatus errorStatus, + V1_0::ErrorStatus errorStatus, const sp& preparedModelPtr) { Return returned = callback->notify(errorStatus, preparedModelPtr); @@ -38,9 +38,9 @@ void NotifyCallbackAndCheck(const sp& callback, } } -Return FailPrepareModel(ErrorStatus error, - const string& message, - const sp& callback) +Return FailPrepareModel(V1_0::ErrorStatus error, + const string& message, + const sp& callback) { ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str()); NotifyCallbackAndCheck(callback, error, nullptr); @@ -54,7 +54,7 @@ namespace armnn_driver { template -Return ArmnnDriverImpl::prepareModel( +Return ArmnnDriverImpl::prepareModel( const armnn::IRuntimePtr& runtime, const armnn::IGpuAccTunedParametersPtr& clTunedParameters, const DriverOptions& options, @@ -67,17 +67,17 @@ Return ArmnnDriverImpl::prepareModel( if (cb.get() == nullptr) { ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel"); - return ErrorStatus::INVALID_ARGUMENT; + return V1_0::ErrorStatus::INVALID_ARGUMENT; } if (!runtime) { - return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb); + return FailPrepareModel(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb); } if (!android::nn::validateModel(model)) { - return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb); + return FailPrepareModel(V1_0::ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb); } // Deliberately ignore any unsupported operations requested by the options - @@ -90,8 +90,8 @@ Return ArmnnDriverImpl::prepareModel( if (modelConverter.GetConversionResult() != ConversionResult::Success) { - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb); + return V1_0::ErrorStatus::NONE; } // Optimize the network @@ -112,8 +112,8 @@ Return ArmnnDriverImpl::prepareModel( { stringstream message; message << "Exception (" << e.what() << ") caught from optimize."; - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return V1_0::ErrorStatus::NONE; } // Check that the optimized network is valid. @@ -125,8 +125,8 @@ Return ArmnnDriverImpl::prepareModel( { message << "\n" << msg; } - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return V1_0::ErrorStatus::NONE; } // Export the optimized network graph to a dot file if an output dump directory @@ -139,15 +139,15 @@ Return ArmnnDriverImpl::prepareModel( { if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success) { - return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb); + return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb); } } catch (std::exception& e) { stringstream message; message << "Exception (" << e.what()<< ") caught from LoadNetwork."; - FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb); - return ErrorStatus::NONE; + FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb); + return V1_0::ErrorStatus::NONE; } // Now that we have a networkId for the graph rename the dump file to use it @@ -168,7 +168,7 @@ Return ArmnnDriverImpl::prepareModel( // this is enabled) before the first 'real' inference which removes the overhead of the first inference. if (!preparedModel->ExecuteWithDummyInputs()) { - return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb); + return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb); } if (clTunedParameters && @@ -186,9 +186,9 @@ Return ArmnnDriverImpl::prepareModel( } } - NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel); + NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel); - return ErrorStatus::NONE; + return V1_0::ErrorStatus::NONE; } template @@ -227,14 +227,14 @@ Return ArmnnDriverImpl::getSupportedOperations(const armnn::IRu if (!runtime) { - cb(ErrorStatus::DEVICE_UNAVAILABLE, result); + cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, result); return Void(); } // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway. if (!android::nn::validateModel(model)) { - cb(ErrorStatus::INVALID_ARGUMENT, result); + cb(V1_0::ErrorStatus::INVALID_ARGUMENT, result); return Void(); } @@ -246,7 +246,7 @@ Return ArmnnDriverImpl::getSupportedOperations(const armnn::IRu if (modelConverter.GetConversionResult() != ConversionResult::Success && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature) { - cb(ErrorStatus::GENERAL_FAILURE, result); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, result); return Void(); } @@ -259,7 +259,7 @@ Return ArmnnDriverImpl::getSupportedOperations(const armnn::IRu result.push_back(operationSupported); } - cb(ErrorStatus::NONE, result); + cb(V1_0::ErrorStatus::NONE, result); return Void(); } diff --git a/ArmnnDriverImpl.hpp b/ArmnnDriverImpl.hpp index 49f0975e..c5b17781 100644 --- a/ArmnnDriverImpl.hpp +++ b/ArmnnDriverImpl.hpp @@ -9,6 +9,10 @@ #include +#ifdef ARMNN_ANDROID_R +using namespace android::nn::hal; +#endif + namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; @@ -32,7 +36,7 @@ public: const HalModel& model, HalGetSupportedOperations_cb); - static Return prepareModel( + static Return prepareModel( const armnn::IRuntimePtr& runtime, const armnn::IGpuAccTunedParametersPtr& clTunedParameters, const DriverOptions& options, diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp index 0899430c..2cd560d7 100644 --- a/ArmnnPreparedModel.cpp +++ b/ArmnnPreparedModel.cpp @@ -11,12 +11,8 @@ #include #include #include - -#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) -// The headers of the ML framework have changed between Android O and Android P. -// The validation functions have been moved into their own header, ValidateHal.h. #include -#endif + #include #include @@ -27,7 +23,7 @@ namespace { using namespace armnn_driver; -void NotifyCallbackAndCheck(const ::android::sp& callback, ErrorStatus errorStatus, +void NotifyCallbackAndCheck(const ::android::sp& callback, V1_0::ErrorStatus errorStatus, std::string callingFunction) { Return returned = callback->notify(errorStatus); @@ -139,21 +135,22 @@ ArmnnPreparedModel::~ArmnnPreparedModel() } template -Return ArmnnPreparedModel::execute(const Request& request, - const ::android::sp& callback) +Return ArmnnPreparedModel::execute( + const V1_0::Request& request, + const ::android::sp& callback) { ALOGV("ArmnnPreparedModel::execute(): %s", GetModelSummary(m_Model).c_str()); m_RequestCount++; if (callback.get() == nullptr) { ALOGE("ArmnnPreparedModel::execute invalid callback passed"); - return ErrorStatus::INVALID_ARGUMENT; + return V1_0::ErrorStatus::INVALID_ARGUMENT; } if (!android::nn::validateRequest(request, m_Model)) { - NotifyCallbackAndCheck(callback, ErrorStatus::INVALID_ARGUMENT, "ArmnnPreparedModel::execute"); - return ErrorStatus::INVALID_ARGUMENT; + NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::INVALID_ARGUMENT, "ArmnnPreparedModel::execute"); + return V1_0::ErrorStatus::INVALID_ARGUMENT; } if (!m_RequestInputsAndOutputsDumpDir.empty()) @@ -170,8 +167,8 @@ Return ArmnnPreparedModel::execute(const Request& reque auto pMemPools = std::make_shared>(); if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) { - NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute"); - return ErrorStatus::GENERAL_FAILURE; + NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } // add the inputs and outputs with their data @@ -187,7 +184,7 @@ Return ArmnnPreparedModel::execute(const Request& reque if (inputTensor.GetMemoryArea() == nullptr) { ALOGE("Cannot execute request. Error converting request input %u to tensor", i); - return ErrorStatus::GENERAL_FAILURE; + return V1_0::ErrorStatus::GENERAL_FAILURE; } pInputTensors->emplace_back(i, inputTensor); @@ -203,7 +200,7 @@ Return ArmnnPreparedModel::execute(const Request& reque if (outputTensor.GetMemoryArea() == nullptr) { ALOGE("Cannot execute request. Error converting request output %u to tensor", i); - return ErrorStatus::GENERAL_FAILURE; + return V1_0::ErrorStatus::GENERAL_FAILURE; } pOutputTensors->emplace_back(i, outputTensor); @@ -212,19 +209,19 @@ Return ArmnnPreparedModel::execute(const Request& reque catch (armnn::Exception& e) { ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what()); - NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute"); - return ErrorStatus::GENERAL_FAILURE; + NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } catch (std::exception& e) { ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what()); - NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute"); - return ErrorStatus::GENERAL_FAILURE; + NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } ALOGV("ArmnnPreparedModel::execute(...) before PostMsg"); - auto cb = [callback](ErrorStatus errorStatus, std::string callingFunction) + auto cb = [callback](V1_0::ErrorStatus errorStatus, std::string callingFunction) { NotifyCallbackAndCheck(callback, errorStatus, callingFunction); }; @@ -234,7 +231,7 @@ Return ArmnnPreparedModel::execute(const Request& reque // post the request for asynchronous execution m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, armnnCb); ALOGV("ArmnnPreparedModel::execute(...) after PostMsg"); - return ErrorStatus::NONE; // successfully queued + return V1_0::ErrorStatus::NONE; // successfully queued } template @@ -255,20 +252,20 @@ void ArmnnPreparedModel::ExecuteGraph( if (status != armnn::Status::Success) { ALOGW("EnqueueWorkload failed"); - cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph"); return; } } catch (armnn::Exception& e) { ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what()); - cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph"); return; } catch (std::exception& e) { ALOGE("std::exception caught from EnqueueWorkload: %s", e.what()); - cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph"); return; } @@ -279,10 +276,16 @@ void ArmnnPreparedModel::ExecuteGraph( // this is simpler and is what the CpuExecutor does. for (android::nn::RunTimePoolInfo& pool : *pMemPools) { - pool.update(); + // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where + // update() has been removed and flush() added. + #if defined(ARMNN_ANDROID_R) // Use the new Android implementation. + pool.flush(); + #else + pool.update(); + #endif } - cb.callback(ErrorStatus::NONE, "ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::NONE, "ExecuteGraph"); } template diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp index 33be972f..270a9339 100644 --- a/ArmnnPreparedModel.hpp +++ b/ArmnnPreparedModel.hpp @@ -38,8 +38,8 @@ public: virtual ~ArmnnPreparedModel(); - virtual Return execute(const Request& request, - const ::android::sp& callback) override; + virtual Return execute(const V1_0::Request& request, + const ::android::sp& callback) override; /// execute the graph prepared from the request void ExecuteGraph(std::shared_ptr>& pMemPools, diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp index 84ff6e24..9b790443 100644 --- a/ArmnnPreparedModel_1_2.cpp +++ b/ArmnnPreparedModel_1_2.cpp @@ -41,7 +41,7 @@ unsigned long MicrosecondsDuration(TimePoint endPoint, TimePoint startPoint) } void NotifyCallbackAndCheck(const ::android::sp& callback, - ErrorStatus errorStatus, + V1_0::ErrorStatus errorStatus, std::vector, const Timing, std::string callingFunction) @@ -56,7 +56,7 @@ void NotifyCallbackAndCheck(const ::android::sp& callb } void NotifyCallbackAndCheck(const ::android::sp& callback, - ErrorStatus errorStatus, + V1_0::ErrorStatus errorStatus, std::vector outputShapes, const Timing timing, std::string callingFunction) @@ -172,16 +172,16 @@ ArmnnPreparedModel_1_2::~ArmnnPreparedModel_1_2() } template -Return ArmnnPreparedModel_1_2::execute(const Request& request, +Return ArmnnPreparedModel_1_2::execute(const V1_0::Request& request, const ::android::sp& callback) { if (callback.get() == nullptr) { ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed"); - return ErrorStatus::INVALID_ARGUMENT; + return V1_0::ErrorStatus::INVALID_ARGUMENT; } - auto cb = [callback](ErrorStatus errorStatus, + auto cb = [callback](V1_0::ErrorStatus errorStatus, std::vector outputShapes, const Timing& timing, std::string callingFunction) @@ -193,17 +193,18 @@ Return ArmnnPreparedModel_1_2::execute(const Request& } template -Return ArmnnPreparedModel_1_2::execute_1_2(const Request& request, - MeasureTiming measureTiming, - const sp& callback) +Return ArmnnPreparedModel_1_2::execute_1_2( + const V1_0::Request& request, + MeasureTiming measureTiming, + const sp& callback) { if (callback.get() == nullptr) { ALOGE("ArmnnPreparedModel_1_2::execute_1_2 invalid callback passed"); - return ErrorStatus::INVALID_ARGUMENT; + return V1_0::ErrorStatus::INVALID_ARGUMENT; } - auto cb = [callback](ErrorStatus errorStatus, + auto cb = [callback](V1_0::ErrorStatus errorStatus, std::vector outputShapes, const Timing& timing, std::string callingFunction) @@ -215,7 +216,7 @@ Return ArmnnPreparedModel_1_2::execute_1_2(const Reque } template -Return ArmnnPreparedModel_1_2::executeSynchronously(const Request& request, +Return ArmnnPreparedModel_1_2::executeSynchronously(const V1_0::Request& request, MeasureTiming measureTiming, executeSynchronously_cb cb) { @@ -238,7 +239,7 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ if (!android::nn::validateRequest(request, m_Model)) { ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model"); - cb(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming); + cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming); return Void(); } @@ -252,7 +253,7 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) { - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } std::vector outputShapes(request.outputs.size()); @@ -270,7 +271,7 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ if (inputTensor.GetMemoryArea() == nullptr) { ALOGE("Cannot execute request. Error converting request input %u to tensor", i); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } @@ -288,7 +289,7 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ if (outputTensor.GetMemoryArea() == nullptr) { ALOGE("Cannot execute request. Error converting request output %u to tensor", i); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } const size_t outputSize = outputTensorInfo.GetNumBytes(); @@ -310,7 +311,7 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ if (bufferSize < outputSize) { ALOGW("ArmnnPreparedModel_1_2::Execute failed"); - cb(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, outputShapes, g_NoTiming); + cb(V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, outputShapes, g_NoTiming); return Void(); } @@ -320,13 +321,13 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ catch (armnn::Exception& e) { ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what()); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } catch (std::exception& e) { ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what()); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } @@ -351,20 +352,20 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ if (status != armnn::Status::Success) { ALOGW("EnqueueWorkload failed"); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } } catch (armnn::Exception& e) { ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what()); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } catch (std::exception& e) { ALOGE("std::exception caught from EnqueueWorkload: %s", e.what()); - cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); return Void(); } @@ -375,8 +376,15 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ // this is simpler and is what the CpuExecutor does. for (android::nn::RunTimePoolInfo& pool : *pMemPools) { - pool.update(); + // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where + // update() has been removed and flush() added. + #if defined(ARMNN_ANDROID_R) // Use the new Android implementation. + pool.flush(); + #else + pool.update(); + #endif } + ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() after Execution"); if (measureTiming == MeasureTiming::YES) @@ -387,11 +395,11 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ timing.timeInDriver = MicrosecondsDuration(driverEnd, driverStart); ALOGV("ArmnnPreparedModel_1_2::executeSynchronously timing Device = %lu Driver = %lu", timing.timeOnDevice, timing.timeInDriver); - cb(ErrorStatus::NONE, outputShapes, timing); + cb(V1_0::ErrorStatus::NONE, outputShapes, timing); } else { - cb(ErrorStatus::NONE, outputShapes, g_NoTiming); + cb(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming); } return Void(); } @@ -402,7 +410,7 @@ Return ArmnnPreparedModel_1_2::executeSynchronously(const Requ /// ml/+/refs/tags/android-10.0.0_r20/nn/common/ExecutionBurstServer.cpp class ArmnnBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache { public: - ArmnnBurstExecutorWithCache(IPreparedModel* preparedModel) + ArmnnBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel) : m_PreparedModel(preparedModel) {} @@ -422,8 +430,8 @@ public: m_MemoryCache.erase(slot); } - std::tuple, Timing> execute( - const Request& request, const std::vector& slots, + std::tuple, Timing> execute( + const V1_0::Request& request, const std::vector& slots, MeasureTiming measure) override { ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache::execute"); @@ -434,14 +442,14 @@ public: return m_MemoryCache[slot]; }); - Request fullRequest = request; + V1_0::Request fullRequest = request; fullRequest.pools = std::move(pools); // Setup Callback - ErrorStatus returnedStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE; hidl_vec returnedOutputShapes; Timing returnedTiming; - auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](ErrorStatus status, + auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](V1_0::ErrorStatus status, const hidl_vec& outputShapes, const Timing& timing) { @@ -454,7 +462,7 @@ public: ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache executing"); const Return ret = m_PreparedModel->executeSynchronously(fullRequest, measure, cb); - if (!ret.isOk() || returnedStatus != ErrorStatus::NONE) + if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) { ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::error executing"); } @@ -462,7 +470,7 @@ public: } private: - IPreparedModel* const m_PreparedModel; + V1_2::IPreparedModel* const m_PreparedModel; std::map m_MemoryCache; }; @@ -484,11 +492,11 @@ Return ArmnnPreparedModel_1_2::configureExecutionBurst( if (burst == nullptr) { - cb(ErrorStatus::GENERAL_FAILURE, {}); + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); } else { - cb(ErrorStatus::NONE, burst); + cb(V1_0::ErrorStatus::NONE, burst); } return Void(); } @@ -546,7 +554,7 @@ void ArmnnPreparedModel_1_2::ExecuteGraph( if (status != armnn::Status::Success) { ALOGW("EnqueueWorkload failed"); - cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, + cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); return; } @@ -554,13 +562,13 @@ void ArmnnPreparedModel_1_2::ExecuteGraph( catch (armnn::Exception& e) { ALOGW("armnn:Exception caught from EnqueueWorkload: %s", e.what()); - cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); return; } catch (std::exception& e) { ALOGE("std::exception caught from EnqueueWorkload: %s", e.what()); - cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); return; } @@ -571,7 +579,13 @@ void ArmnnPreparedModel_1_2::ExecuteGraph( // this is simpler and is what the CpuExecutor does. for (android::nn::RunTimePoolInfo& pool : *pMemPools) { - pool.update(); + // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where + // update() has been removed and flush() added. + #if defined(ARMNN_ANDROID_R) // Use the new Android implementation. + pool.flush(); + #else + pool.update(); + #endif } if (cb.measureTiming == MeasureTiming::YES) @@ -580,9 +594,9 @@ void ArmnnPreparedModel_1_2::ExecuteGraph( Timing timing; timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart); timing.timeInDriver = MicrosecondsDuration(driverEnd, cb.driverStart); - cb.callback(ErrorStatus::NONE, outputShapes, timing, "ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::NONE, outputShapes, timing, "ExecuteGraph"); } else { - cb.callback(ErrorStatus::NONE, outputShapes, g_NoTiming, "ExecuteGraph"); + cb.callback(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming, "ExecuteGraph"); } } @@ -633,9 +647,9 @@ bool ArmnnPreparedModel_1_2::ExecuteWithDummyInputs() } template -Return ArmnnPreparedModel_1_2::Execute(const Request& request, - MeasureTiming measureTiming, - armnnExecuteCallback_1_2 callback) +Return ArmnnPreparedModel_1_2::Execute(const V1_0::Request& request, + MeasureTiming measureTiming, + armnnExecuteCallback_1_2 callback) { TimePoint driverStart; @@ -649,8 +663,8 @@ Return ArmnnPreparedModel_1_2::Execute(const Request& if (!android::nn::validateRequest(request, m_Model)) { - callback(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); - return ErrorStatus::INVALID_ARGUMENT; + callback(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); + return V1_0::ErrorStatus::INVALID_ARGUMENT; } if (!m_RequestInputsAndOutputsDumpDir.empty()) @@ -668,8 +682,8 @@ Return ArmnnPreparedModel_1_2::Execute(const Request& if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) { - callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); - return ErrorStatus::GENERAL_FAILURE; + callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } // add the inputs and outputs with their data @@ -686,8 +700,8 @@ Return ArmnnPreparedModel_1_2::Execute(const Request& if (inputTensor.GetMemoryArea() == nullptr) { ALOGE("Cannot execute request. Error converting request input %u to tensor", i); - callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); - return ErrorStatus::GENERAL_FAILURE; + callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } pInputTensors->emplace_back(i, inputTensor); @@ -705,8 +719,8 @@ Return ArmnnPreparedModel_1_2::Execute(const Request& if (outputTensor.GetMemoryArea() == nullptr) { ALOGE("Cannot execute request. Error converting request output %u to tensor", i); - callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); - return ErrorStatus::GENERAL_FAILURE; + callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } const size_t outputSize = outputTensorInfo.GetNumBytes(); @@ -729,25 +743,25 @@ Return ArmnnPreparedModel_1_2::Execute(const Request& if (bufferSize < outputSize) { ALOGW("ArmnnPreparedModel_1_2::Execute failed"); - callback(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, + callback(V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, outputShapes, g_NoTiming, "ArmnnPreparedModel_1_2::Execute"); - return ErrorStatus::NONE; + return V1_0::ErrorStatus::NONE; } } } catch (armnn::Exception& e) { ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what()); - callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); - return ErrorStatus::GENERAL_FAILURE; + callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } catch (std::exception& e) { ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what()); - callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); - return ErrorStatus::GENERAL_FAILURE; + callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); + return V1_0::ErrorStatus::GENERAL_FAILURE; } ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg"); @@ -758,7 +772,7 @@ Return ArmnnPreparedModel_1_2::Execute(const Request& armnnCb.driverStart = driverStart; m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, armnnCb); ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg"); - return ErrorStatus::NONE; + return V1_0::ErrorStatus::NONE; } #ifdef ARMNN_ANDROID_NN_V1_2 diff --git a/ArmnnPreparedModel_1_2.hpp b/ArmnnPreparedModel_1_2.hpp index b97895e8..f609ef7e 100644 --- a/ArmnnPreparedModel_1_2.hpp +++ b/ArmnnPreparedModel_1_2.hpp @@ -45,13 +45,13 @@ public: virtual ~ArmnnPreparedModel_1_2(); - virtual Return execute(const Request& request, - const sp& callback) override; + virtual Return execute(const V1_0::Request& request, + const sp& callback) override; - virtual Return execute_1_2(const Request& request, MeasureTiming measure, - const sp& callback) override; + virtual Return execute_1_2(const V1_0::Request& request, MeasureTiming measure, + const sp& callback) override; - virtual Return executeSynchronously(const Request &request, + virtual Return executeSynchronously(const V1_0::Request &request, MeasureTiming measure, V1_2::IPreparedModel::executeSynchronously_cb cb) override; @@ -72,9 +72,9 @@ public: bool ExecuteWithDummyInputs(); private: - Return Execute(const Request& request, - MeasureTiming measureTiming, - armnnExecuteCallback_1_2 callback); + Return Execute(const V1_0::Request& request, + MeasureTiming measureTiming, + armnnExecuteCallback_1_2 callback); template void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index eea70d7b..997c9cc2 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -35,6 +35,10 @@ namespace armnn_driver /// Helper classes /// +#ifdef ARMNN_ANDROID_R +using OperandType = android::nn::hal::OperandType; +#endif + struct ConversionData { ConversionData(const std::vector& backends) diff --git a/Utils.cpp b/Utils.cpp index 3583d62d..c95f6e12 100644 --- a/Utils.cpp +++ b/Utils.cpp @@ -69,13 +69,7 @@ void* GetMemoryFromPool(DataLocation location, const std::vector diff --git a/androidnn.go b/androidnn.go deleted file mode 100644 index 92b7b2aa..00000000 --- a/androidnn.go +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright © 2017 ARM Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -package armnn_nn_driver - -import ( - "android/soong/android" - "android/soong/cc" -) - -func globalFlags(ctx android.BaseContext) []string { - var cppflags []string - - if ctx.AConfig().PlatformVersionName() == "Q" || ctx.AConfig().PlatformVersionName() == "10" { - cppflags = append(cppflags, "-fno-addrsig") - } - - return cppflags -} - -func armnnNNDriverDefaults(ctx android.LoadHookContext) { - type props struct { - Cppflags []string - } - - p := &props{} - p.Cppflags = globalFlags(ctx) - - ctx.AppendProperties(p) -} - -func init() { - - android.RegisterModuleType("armnn_nn_driver_defaults", armnnNNDriverDefaultsFactory) -} - -func armnnNNDriverDefaultsFactory() android.Module { - - module := cc.DefaultsFactory() - android.AddLoadHook(module, armnnNNDriverDefaults) - return module -} diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp index 529371e1..10ca3aec 100644 --- a/test/1.1/Mean.cpp +++ b/test/1.1/Mean.cpp @@ -78,7 +78,7 @@ void MeanTestImpl(const TestTensor& input, outArg.dimensions = expectedOutput.GetDimensions(); // Make the request based on the arguments - Request request = {}; + V1_0::Request request = {}; request.inputs = hidl_vec{ inArg }; request.outputs = hidl_vec{ outArg }; @@ -89,8 +89,8 @@ void MeanTestImpl(const TestTensor& input, android::sp outMemory = AddPoolAndGetData(expectedOutput.GetNumElements(), request); const float* outputData = static_cast(static_cast(outMemory->getPointer())); - ErrorStatus execStatus = Execute(preparedModel, request); - BOOST_TEST(execStatus == ErrorStatus::NONE); + V1_0::ErrorStatus execStatus = Execute(preparedModel, request); + BOOST_TEST(execStatus == V1_0::ErrorStatus::NONE); const float* expectedOutputData = expectedOutput.GetData(); for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++) diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp index 4d4238ba..5679ca22 100644 --- a/test/1.1/Transpose.cpp +++ b/test/1.1/Transpose.cpp @@ -76,7 +76,7 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[], output.dimensions = expectedOutputTensor.GetDimensions(); // make the request based on the arguments - Request request = {}; + V1_0::Request request = {}; request.inputs = hidl_vec{input}; request.outputs = hidl_vec{output}; diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp index 8a769db2..2bbd7bed 100644 --- a/test/1.2/Capabilities.cpp +++ b/test/1.2/Capabilities.cpp @@ -57,8 +57,9 @@ struct CapabilitiesFixture } }; -void CheckOperandType(const V1_2::Capabilities& capabilities, OperandType type, float execTime, float powerUsage) +void CheckOperandType(const V1_2::Capabilities& capabilities, V1_2::OperandType type, float execTime, float powerUsage) { + using namespace armnn_driver::hal_1_2; PerformanceInfo perfInfo = android::nn::lookup(capabilities.operandPerformance, type); BOOST_ASSERT(perfInfo.execTime == execTime); BOOST_ASSERT(perfInfo.powerUsage == powerUsage); @@ -71,28 +72,28 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime) using namespace armnn_driver::hal_1_2; using namespace android::nn; - auto getCapabilitiesFn = [&](ErrorStatus error, const V1_2::Capabilities& capabilities) + auto getCapabilitiesFn = [&](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities) { - CheckOperandType(capabilities, OperandType::TENSOR_FLOAT32, 2.0f, 2.1f); - CheckOperandType(capabilities, OperandType::FLOAT32, 2.2f, 2.3f); - CheckOperandType(capabilities, OperandType::TENSOR_FLOAT16, 2.4f, 2.5f); - CheckOperandType(capabilities, OperandType::FLOAT16, 2.6f, 2.7f); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_ASYMM, 2.8f, 2.9f); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_SYMM, 3.0f, 3.1f); - CheckOperandType(capabilities, OperandType::TENSOR_INT32, 3.2f, 3.3f); - CheckOperandType(capabilities, OperandType::INT32, 3.4f, 3.5f); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM, 2.8f, 2.9f); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, 2.8f, 2.9f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT32, 2.0f, 2.1f); + CheckOperandType(capabilities, V1_2::OperandType::FLOAT32, 2.2f, 2.3f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT16, 2.4f, 2.5f); + CheckOperandType(capabilities, V1_2::OperandType::FLOAT16, 2.6f, 2.7f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_ASYMM, 2.8f, 2.9f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_SYMM, 3.0f, 3.1f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_INT32, 3.2f, 3.3f); + CheckOperandType(capabilities, V1_2::OperandType::INT32, 3.4f, 3.5f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_SYMM, 2.8f, 2.9f); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, 2.8f, 2.9f); // Unsupported operands take FLT_MAX value - CheckOperandType(capabilities, OperandType::UINT32, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::BOOL, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::OEM, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX); - - BOOST_ASSERT(error == ErrorStatus::NONE); + CheckOperandType(capabilities, V1_2::OperandType::UINT32, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::BOOL, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX); + + BOOST_ASSERT(error == V1_0::ErrorStatus::NONE); }; __system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", "2.0f"); @@ -129,28 +130,31 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesUndefined) float defaultValue = .1f; - auto getCapabilitiesFn = [&](ErrorStatus error, const V1_2::Capabilities& capabilities) + auto getCapabilitiesFn = [&](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities) { - CheckOperandType(capabilities, OperandType::TENSOR_FLOAT32, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::FLOAT32, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::TENSOR_FLOAT16, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::FLOAT16, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_ASYMM, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_SYMM, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::TENSOR_INT32, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::INT32, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, defaultValue, defaultValue); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT32, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::FLOAT32, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT16, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::FLOAT16, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_ASYMM, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_SYMM, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_INT32, defaultValue, defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::INT32, defaultValue, defaultValue); + CheckOperandType(capabilities, + V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, + defaultValue, + defaultValue); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_SYMM, defaultValue, defaultValue); // Unsupported operands take FLT_MAX value - CheckOperandType(capabilities, OperandType::UINT32, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::BOOL, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::OEM, FLT_MAX, FLT_MAX); - CheckOperandType(capabilities, OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX); - - BOOST_ASSERT(error == ErrorStatus::NONE); + CheckOperandType(capabilities, V1_2::OperandType::UINT32, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::BOOL, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX); + CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX); + + BOOST_ASSERT(error == V1_0::ErrorStatus::NONE); }; armnn::IRuntime::CreationOptions options; diff --git a/test/Android.mk b/test/Android.mk index 13a36b54..0448d187 100644 --- a/test/Android.mk +++ b/test/Android.mk @@ -42,12 +42,11 @@ LOCAL_CFLAGS := \ -O0 \ -UNDEBUG -ifeq ($(P_OR_LATER),1) -# Required to build with the changes made to the Android ML framework starting from Android P, -# regardless of the HAL version used for the build. +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(R_OR_LATER),1) LOCAL_CFLAGS+= \ - -DARMNN_ANDROID_P -endif # PLATFORM_VERSION == 9 + -DARMNN_ANDROID_R +endif # R or later ifeq ($(Q_OR_LATER),1) LOCAL_CFLAGS += \ @@ -108,6 +107,11 @@ LOCAL_SHARED_LIBRARIES+= \ android.hardware.neuralnetworks@1.2 endif # PLATFORM_VERSION == Q +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -148,9 +152,14 @@ LOCAL_CFLAGS := \ -Werror \ -O0 \ -UNDEBUG \ - -DARMNN_ANDROID_P \ -DARMNN_ANDROID_NN_V1_1 +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(R_OR_LATER),1) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later + ifeq ($(Q_OR_LATER),1) LOCAL_CFLAGS += \ -DBOOST_NO_AUTO_PTR @@ -207,6 +216,11 @@ LOCAL_SHARED_LIBRARIES+= \ android.hardware.neuralnetworks@1.2 endif # PLATFORM_VERSION == Q +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -245,13 +259,14 @@ LOCAL_CFLAGS := \ -Werror \ -O0 \ -UNDEBUG \ - -DARMNN_ANDROID_Q \ + -DBOOST_NO_AUTO_PTR \ -DARMNN_ANDROID_NN_V1_2 -ifeq ($(Q_OR_LATER),1) -LOCAL_CFLAGS += \ - -DBOOST_NO_AUTO_PTR -endif # PLATFORM_VERSION == Q or later +# Required to build with the changes made to the Android ML framework specific to Android R +ifeq ($(R_OR_LATER),1) +LOCAL_CFLAGS+= \ + -DARMNN_ANDROID_R +endif # R or later LOCAL_SRC_FILES := \ 1.0/Convolution2D.cpp \ @@ -303,6 +318,11 @@ LOCAL_SHARED_LIBRARIES := \ android.hidl.allocator@1.0 \ android.hidl.memory@1.0 +ifeq ($(R_OR_LATER),1) +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.3 +endif # R or later + ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) LOCAL_SHARED_LIBRARIES+= \ libOpenCL @@ -310,4 +330,4 @@ endif include $(BUILD_EXECUTABLE) -endif # PLATFORM_VERSION == Q +endif # PLATFORM_VERSION == Q \ No newline at end of file diff --git a/test/Concat.cpp b/test/Concat.cpp index 9beb67bd..b99e31cc 100644 --- a/test/Concat.cpp +++ b/test/Concat.cpp @@ -35,8 +35,8 @@ ConcatTestImpl(const std::vector & inputs, int32_t concatAxis, const TestTensor & expectedOutputTensor, armnn::Compute computeDevice, - ErrorStatus expectedPrepareStatus=ErrorStatus::NONE, - ErrorStatus expectedExecStatus=ErrorStatus::NONE) + V1_0::ErrorStatus expectedPrepareStatus=V1_0::ErrorStatus::NONE, + V1_0::ErrorStatus expectedExecStatus=V1_0::ErrorStatus::NONE) { std::unique_ptr driver = std::make_unique(DriverOptions(computeDevice)); HalPolicy::Model model{}; @@ -59,13 +59,13 @@ ConcatTestImpl(const std::vector & inputs, model.operations[0].outputs = hidl_vec{static_cast(inputs.size()+1)}; // make the prepared model - ErrorStatus prepareStatus=ErrorStatus::NONE; + V1_0::ErrorStatus prepareStatus=V1_0::ErrorStatus::NONE; android::sp preparedModel = PrepareModelWithStatus(model, *driver, prepareStatus, expectedPrepareStatus); BOOST_TEST(prepareStatus == expectedPrepareStatus); - if (prepareStatus != ErrorStatus::NONE) + if (prepareStatus != V1_0::ErrorStatus::NONE) { // prepare failed, we cannot continue return; @@ -111,7 +111,7 @@ ConcatTestImpl(const std::vector & inputs, } // make the request based on the arguments - Request request = {}; + V1_0::Request request = {}; request.inputs = inputArguments; request.outputs = outputArguments; @@ -131,7 +131,7 @@ ConcatTestImpl(const std::vector & inputs, auto execStatus = Execute(preparedModel, request, expectedExecStatus); BOOST_TEST(execStatus == expectedExecStatus); - if (execStatus == ErrorStatus::NONE) + if (execStatus == V1_0::ErrorStatus::NONE) { // check the result if there was no error const float * expectedOutput = expectedOutputTensor.GetData(); @@ -310,7 +310,7 @@ BOOST_DATA_TEST_CASE(AxisTooBig, COMPUTE_DEVICES) // The axis must be within the range of [-rank(values), rank(values)) // see: https://www.tensorflow.org/api_docs/python/tf/concat TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}}; - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus); } @@ -323,7 +323,7 @@ BOOST_DATA_TEST_CASE(AxisTooSmall, COMPUTE_DEVICES) // The axis must be within the range of [-rank(values), rank(values)) // see: https://www.tensorflow.org/api_docs/python/tf/concat TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}}; - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus); } @@ -333,7 +333,7 @@ BOOST_DATA_TEST_CASE(TooFewInputs, COMPUTE_DEVICES) TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}}; // We need at least two tensors to concatenate - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn}, axis, aIn, sample, expectedParserStatus); } @@ -350,7 +350,7 @@ BOOST_DATA_TEST_CASE(MismatchedInputDimensions, COMPUTE_DEVICES) 2, 3, 7, 8, 9, 11}}; // The input dimensions must be compatible - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, sample, expectedParserStatus); } @@ -362,7 +362,7 @@ BOOST_DATA_TEST_CASE(MismatchedInputRanks, COMPUTE_DEVICES) TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}}; // The input dimensions must be compatible - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn, &bIn}, axis, expected, sample, expectedParserStatus); } @@ -380,7 +380,7 @@ BOOST_DATA_TEST_CASE(MismatchedOutputDimensions, COMPUTE_DEVICES) 2, 3, 7, 8, 9, 11}}; // The input and output dimensions must be compatible - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus); } @@ -398,7 +398,7 @@ BOOST_DATA_TEST_CASE(MismatchedOutputRank, COMPUTE_DEVICES) 2, 3, 7, 8, 9, 11}}; // The input and output ranks must match - ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE; + V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE; ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus); } diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp index 9fe6f46e..ecf25e17 100644 --- a/test/Concurrent.cpp +++ b/test/Concurrent.cpp @@ -75,7 +75,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute) output.dimensions = hidl_vec{}; // build the requests - Request requests[maxRequests]; + V1_0::Request requests[maxRequests]; android::sp outMemory[maxRequests]; float* outdata[maxRequests]; for (size_t i = 0; i < maxRequests; ++i) diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp index 180f57e2..002677fe 100644 --- a/test/Convolution2D.hpp +++ b/test/Convolution2D.hpp @@ -93,7 +93,7 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled output.location = outloc; output.dimensions = hidl_vec{}; - Request request = {}; + V1_0::Request request = {}; request.inputs = hidl_vec{input}; request.outputs = hidl_vec{output}; diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp index 3a3c98ff..0bc0cf76 100644 --- a/test/DriverTestHelpers.cpp +++ b/test/DriverTestHelpers.cpp @@ -15,7 +15,7 @@ namespace neuralnetworks namespace V1_0 { -std::ostream& operator<<(std::ostream& os, ErrorStatus stat) +std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat) { return os << static_cast(stat); } @@ -31,7 +31,7 @@ namespace driverTestHelpers using namespace android::hardware; using namespace armnn_driver; -Return ExecutionCallback::notify(ErrorStatus status) +Return ExecutionCallback::notify(V1_0::ErrorStatus status) { (void)status; ALOGI("ExecutionCallback::notify invoked"); @@ -53,7 +53,7 @@ Return ExecutionCallback::wait() return Void(); } -Return PreparedModelCallback::notify(ErrorStatus status, +Return PreparedModelCallback::notify(V1_0::ErrorStatus status, const android::sp& preparedModel) { m_ErrorStatus = status; @@ -63,7 +63,7 @@ Return PreparedModelCallback::notify(ErrorStatus status, #ifdef ARMNN_ANDROID_NN_V1_2 -Return PreparedModelCallback_1_2::notify(ErrorStatus status, +Return PreparedModelCallback_1_2::notify(V1_0::ErrorStatus status, const android::sp& preparedModel) { m_ErrorStatus = status; @@ -71,7 +71,7 @@ Return PreparedModelCallback_1_2::notify(ErrorStatus status, return Void(); } -Return PreparedModelCallback_1_2::notify_1_2(ErrorStatus status, +Return PreparedModelCallback_1_2::notify_1_2(V1_0::ErrorStatus status, const android::sp& preparedModel) { m_ErrorStatus = status; @@ -104,15 +104,15 @@ hidl_memory allocateSharedMemory(int64_t size) android::sp PrepareModelWithStatus(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus& prepareStatus, - ErrorStatus expectedStatus) + V1_0::ErrorStatus& prepareStatus, + V1_0::ErrorStatus expectedStatus) { android::sp cb(new PreparedModelCallback()); driver.prepareModel(model, cb); prepareStatus = cb->GetErrorStatus(); BOOST_TEST(prepareStatus == expectedStatus); - if (expectedStatus == ErrorStatus::NONE) + if (expectedStatus == V1_0::ErrorStatus::NONE) { BOOST_TEST((cb->GetPreparedModel() != nullptr)); } @@ -123,15 +123,15 @@ android::sp PrepareModelWithStatus(const V1_0::Model& mode android::sp PrepareModelWithStatus(const V1_1::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus& prepareStatus, - ErrorStatus expectedStatus) + V1_0::ErrorStatus& prepareStatus, + V1_0::ErrorStatus expectedStatus) { android::sp cb(new PreparedModelCallback()); driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb); prepareStatus = cb->GetErrorStatus(); BOOST_TEST(prepareStatus == expectedStatus); - if (expectedStatus == ErrorStatus::NONE) + if (expectedStatus == V1_0::ErrorStatus::NONE) { BOOST_TEST((cb->GetPreparedModel() != nullptr)); } @@ -144,8 +144,8 @@ android::sp PrepareModelWithStatus(const V1_1::Model& mode android::sp PrepareModelWithStatus_1_2(const armnn_driver::hal_1_2::HalPolicy::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus& prepareStatus, - ErrorStatus expectedStatus) + V1_0::ErrorStatus& prepareStatus, + V1_0::ErrorStatus expectedStatus) { android::sp cb(new PreparedModelCallback_1_2()); @@ -157,7 +157,7 @@ android::sp PrepareModelWithStatus_1_2(const armnn_driver: prepareStatus = cb->GetErrorStatus(); BOOST_TEST(prepareStatus == expectedStatus); - if (expectedStatus == ErrorStatus::NONE) + if (expectedStatus == V1_0::ErrorStatus::NONE) { BOOST_TEST((cb->GetPreparedModel_1_2() != nullptr)); } @@ -166,23 +166,24 @@ android::sp PrepareModelWithStatus_1_2(const armnn_driver: #endif -ErrorStatus Execute(android::sp preparedModel, - const Request& request, - ErrorStatus expectedStatus) +V1_0::ErrorStatus Execute(android::sp preparedModel, + const V1_0::Request& request, + V1_0::ErrorStatus expectedStatus) { BOOST_TEST(preparedModel.get() != nullptr); android::sp cb(new ExecutionCallback()); - ErrorStatus execStatus = preparedModel->execute(request, cb); + V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb); BOOST_TEST(execStatus == expectedStatus); ALOGI("Execute: waiting for callback to be invoked"); cb->wait(); return execStatus; } -android::sp ExecuteNoWait(android::sp preparedModel, const Request& request) +android::sp ExecuteNoWait(android::sp preparedModel, + const V1_0::Request& request) { android::sp cb(new ExecutionCallback()); - BOOST_TEST(preparedModel->execute(request, cb) == ErrorStatus::NONE); + BOOST_TEST(preparedModel->execute(request, cb) == V1_0::ErrorStatus::NONE); ALOGI("ExecuteNoWait: returning callback object"); return cb; } diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp index 9da02603..7a35b23e 100644 --- a/test/DriverTestHelpers.hpp +++ b/test/DriverTestHelpers.hpp @@ -12,6 +12,10 @@ #include #include +#include + +using ::android::hidl::allocator::V1_0::IAllocator; + namespace android { namespace hardware @@ -21,7 +25,7 @@ namespace neuralnetworks namespace V1_0 { -std::ostream& operator<<(std::ostream& os, ErrorStatus stat); +std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat); } // namespace android::hardware::neuralnetworks::V1_0 } // namespace android::hardware::neuralnetworks @@ -36,7 +40,7 @@ std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat); struct ExecutionCallback : public V1_0::IExecutionCallback { ExecutionCallback() : mNotified(false) {} - Return notify(ErrorStatus status) override; + Return notify(V1_0::ErrorStatus status) override; /// wait until the callback has notified us that it is done Return wait(); @@ -52,18 +56,18 @@ class PreparedModelCallback : public V1_0::IPreparedModelCallback { public: PreparedModelCallback() - : m_ErrorStatus(ErrorStatus::NONE) + : m_ErrorStatus(V1_0::ErrorStatus::NONE) , m_PreparedModel() { } ~PreparedModelCallback() override { } - Return notify(ErrorStatus status, + Return notify(V1_0::ErrorStatus status, const android::sp& preparedModel) override; - ErrorStatus GetErrorStatus() { return m_ErrorStatus; } + V1_0::ErrorStatus GetErrorStatus() { return m_ErrorStatus; } android::sp GetPreparedModel() { return m_PreparedModel; } private: - ErrorStatus m_ErrorStatus; + V1_0::ErrorStatus m_ErrorStatus; android::sp m_PreparedModel; }; @@ -73,24 +77,24 @@ class PreparedModelCallback_1_2 : public V1_2::IPreparedModelCallback { public: PreparedModelCallback_1_2() - : m_ErrorStatus(ErrorStatus::NONE) + : m_ErrorStatus(V1_0::ErrorStatus::NONE) , m_PreparedModel() , m_PreparedModel_1_2() { } ~PreparedModelCallback_1_2() override { } - Return notify(ErrorStatus status, const android::sp& preparedModel) override; + Return notify(V1_0::ErrorStatus status, const android::sp& preparedModel) override; - Return notify_1_2(ErrorStatus status, const android::sp& preparedModel) override; + Return notify_1_2(V1_0::ErrorStatus status, const android::sp& preparedModel) override; - ErrorStatus GetErrorStatus() { return m_ErrorStatus; } + V1_0::ErrorStatus GetErrorStatus() { return m_ErrorStatus; } android::sp GetPreparedModel() { return m_PreparedModel; } android::sp GetPreparedModel_1_2() { return m_PreparedModel_1_2; } private: - ErrorStatus m_ErrorStatus; + V1_0::ErrorStatus m_ErrorStatus; android::sp m_PreparedModel; android::sp m_PreparedModel_1_2; }; @@ -100,7 +104,7 @@ private: hidl_memory allocateSharedMemory(int64_t size); template -android::sp AddPoolAndGetData(uint32_t size, Request& request) +android::sp AddPoolAndGetData(uint32_t size, V1_0::Request& request) { hidl_memory pool; @@ -119,7 +123,7 @@ android::sp AddPoolAndGetData(uint32_t size, Request& request) } template -void AddPoolAndSetData(uint32_t size, Request& request, const T* data) +void AddPoolAndSetData(uint32_t size, V1_0::Request& request, const T* data) { android::sp memory = AddPoolAndGetData(size, request); @@ -201,7 +205,7 @@ void AddTensorOperand(HalModel& model, const hidl_vec& dimensions, const T* values, HalOperandType operandType = HalOperandType::TENSOR_FLOAT32, - HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY, + HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY, double scale = 0.f, int offset = 0) { @@ -247,7 +251,7 @@ void AddTensorOperand(HalModel& model, const hidl_vec& dimensions, const std::vector& values, HalOperandType operandType = HalPolicy::OperandType::TENSOR_FLOAT32, - HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY, + HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY, double scale = 0.f, int offset = 0) { @@ -306,15 +310,15 @@ void AddOutputOperand(HalModel& model, android::sp PrepareModelWithStatus(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus& prepareStatus, - ErrorStatus expectedStatus = ErrorStatus::NONE); + V1_0::ErrorStatus& prepareStatus, + V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE); #if defined(ARMNN_ANDROID_NN_V1_1) || defined(ARMNN_ANDROID_NN_V1_2) android::sp PrepareModelWithStatus(const V1_1::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus& prepareStatus, - ErrorStatus expectedStatus = ErrorStatus::NONE); + V1_0::ErrorStatus& prepareStatus, + V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE); #endif @@ -322,7 +326,7 @@ template android::sp PrepareModel(const HalModel& model, armnn_driver::ArmnnDriver& driver) { - ErrorStatus prepareStatus = ErrorStatus::NONE; + V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE; return PrepareModelWithStatus(model, driver, prepareStatus); } @@ -330,25 +334,25 @@ android::sp PrepareModel(const HalModel& model, android::sp PrepareModelWithStatus_1_2(const armnn_driver::hal_1_2::HalPolicy::Model& model, armnn_driver::ArmnnDriver& driver, - ErrorStatus& prepareStatus, - ErrorStatus expectedStatus = ErrorStatus::NONE); + V1_0::ErrorStatus& prepareStatus, + V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE); template android::sp PrepareModel_1_2(const HalModel& model, armnn_driver::ArmnnDriver& driver) { - ErrorStatus prepareStatus = ErrorStatus::NONE; + V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE; return PrepareModelWithStatus_1_2(model, driver, prepareStatus); } #endif -ErrorStatus Execute(android::sp preparedModel, - const Request& request, - ErrorStatus expectedStatus = ErrorStatus::NONE); +V1_0::ErrorStatus Execute(android::sp preparedModel, + const V1_0::Request& request, + V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE); android::sp ExecuteNoWait(android::sp preparedModel, - const Request& request); + const V1_0::Request& request); } // namespace driverTestHelpers diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp index e8b5dc26..a6983dd8 100644 --- a/test/FullyConnected.cpp +++ b/test/FullyConnected.cpp @@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE(FullyConnected) output.location = outloc; output.dimensions = hidl_vec{}; - Request request = {}; + V1_0::Request request = {}; request.inputs = hidl_vec{input}; request.outputs = hidl_vec{output}; @@ -87,10 +87,10 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus error; + V1_0::ErrorStatus error; std::vector sup; - ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector& supported) + ArmnnDriver::getSupportedOperations_cb cb = [&](V1_0::ErrorStatus status, const std::vector& supported) { error = status; sup = supported; @@ -143,7 +143,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput) output.location = outloc; output.dimensions = hidl_vec{}; - Request request = {}; + V1_0::Request request = {}; request.inputs = hidl_vec{input}; request.outputs = hidl_vec{output}; @@ -173,10 +173,10 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus error; + V1_0::ErrorStatus error; std::vector sup; - ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector& supported) + ArmnnDriver::getSupportedOperations_cb cb = [&](V1_0::ErrorStatus status, const std::vector& supported) { error = status; sup = supported; @@ -229,7 +229,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape) output.location = outloc; output.dimensions = hidl_vec{}; - Request request = {}; + V1_0::Request request = {}; request.inputs = hidl_vec{input}; request.outputs = hidl_vec{output}; diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp index 3788e66a..961ab166 100644 --- a/test/GenericLayerTests.cpp +++ b/test/GenericLayerTests.cpp @@ -22,10 +22,10 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus errorStatus; + V1_0::ErrorStatus errorStatus; std::vector supported; - auto cb = [&](ErrorStatus _errorStatus, const std::vector& _supported) + auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector& _supported) { errorStatus = _errorStatus; supported = _supported; @@ -52,7 +52,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model0.operations[0].outputs = hidl_vec{4}; driver->getSupportedOperations(model0, cb); - BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE); BOOST_TEST(supported.size() == (size_t)1); BOOST_TEST(supported[0] == true); @@ -81,19 +81,8 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) driver->getSupportedOperations(model1, cb); -#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) - // In Android P, android::nn::validateModel returns INVALID_ARGUMENT, because of the wrong number of inputs for the - // fully connected layer (1 instead of 4) - BOOST_TEST((int)errorStatus == (int)ErrorStatus::INVALID_ARGUMENT); + BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT); BOOST_TEST(supported.empty()); -#else - // In Android O, android::nn::validateModel indicates that the second (wrong) fully connected layer in unsupported - // in the vector of flags returned by the callback - BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); - BOOST_TEST(supported.size() == (size_t)2); - BOOST_TEST(supported[0] == true); - BOOST_TEST(supported[1] == false); -#endif // Test Broadcast on add/mul operators HalPolicy::Model model2 = {}; @@ -115,7 +104,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model2.operations[1].outputs = hidl_vec{4}; driver->getSupportedOperations(model2, cb); - BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE); BOOST_TEST(supported.size() == (size_t)2); BOOST_TEST(supported[0] == true); BOOST_TEST(supported[1] == true); @@ -144,7 +133,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model3.operations[0].outputs = hidl_vec{3, 4}; driver->getSupportedOperations(model3, cb); - BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE); BOOST_TEST(supported.size() == (size_t)1); BOOST_TEST(supported[0] == false); @@ -159,7 +148,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model4.operations[0].outputs = hidl_vec{0}; driver->getSupportedOperations(model4, cb); - BOOST_TEST((int)errorStatus == (int)ErrorStatus::INVALID_ARGUMENT); + BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT); BOOST_TEST(supported.empty()); } @@ -170,10 +159,10 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus errorStatus; + V1_0::ErrorStatus errorStatus; std::vector supported; - auto cb = [&](ErrorStatus _errorStatus, const std::vector& _supported) + auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector& _supported) { errorStatus = _errorStatus; supported = _supported; @@ -233,7 +222,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) // We are testing that the unsupported layers return false and the test continues rather than failing and stopping driver->getSupportedOperations(model, cb); - BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE); BOOST_TEST(supported.size() == (size_t)3); BOOST_TEST(supported[0] == false); BOOST_TEST(supported[1] == true); @@ -246,10 +235,10 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus errorStatus; + V1_0::ErrorStatus errorStatus; std::vector supported; - auto cb = [&](ErrorStatus _errorStatus, const std::vector& _supported) + auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector& _supported) { errorStatus = _errorStatus; supported = _supported; @@ -261,7 +250,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail) // Memory pool mapping should fail, we should report an error driver->getSupportedOperations(model, cb); - BOOST_TEST((int)errorStatus != (int)ErrorStatus::NONE); + BOOST_TEST((int)errorStatus != (int)V1_0::ErrorStatus::NONE); BOOST_TEST(supported.empty()); } diff --git a/test/Lstm.hpp b/test/Lstm.hpp index f0d3d853..d3e03d75 100644 --- a/test/Lstm.hpp +++ b/test/Lstm.hpp @@ -54,18 +54,18 @@ bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f) // Helper function to create an OperandLifeTime::NO_VALUE for testing. // To be used on optional input operands that have no values - these are valid and should be tested. -OperandLifeTime CreateNoValueLifeTime(const hidl_vec& dimensions) +V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec& dimensions) { // Only create a NO_VALUE for optional operands that have no elements if (dimensions.size() == 0 || dimensions[0] == 0) { - return OperandLifeTime::NO_VALUE; + return V1_0::OperandLifeTime::NO_VALUE; } - return OperandLifeTime::CONSTANT_COPY; + return V1_0::OperandLifeTime::CONSTANT_COPY; } template -void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const Request& request) +void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request) { android::sp preparedModel = PrepareModel(model, driver); if (preparedModel.get() != nullptr) @@ -79,7 +79,7 @@ void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, cons template<> void ExecuteModel(const armnn_driver::hal_1_2::HalPolicy::Model& model, armnn_driver::ArmnnDriver& driver, - const Request& request) + const V1_0::Request& request) { android::sp preparedModel = PrepareModel_1_2(model, driver); if (preparedModel.get() != nullptr) @@ -362,7 +362,7 @@ void LstmTestImpl(const hidl_vec& inputDimensions, outputArguments[2] = CreateRequestArgument(cellStateOutValue, 5); outputArguments[3] = CreateRequestArgument(outputValue, 6); - Request request = {}; + V1_0::Request request = {}; request.inputs = inputArguments; request.outputs = outputArguments; @@ -640,7 +640,7 @@ void QuantizedLstmTestImpl(const hidl_vec& inputDimensions, outputArguments[0] = CreateRequestArgument(cellStateOutValue, 3); outputArguments[1] = CreateRequestArgument(outputValue, 4); - Request request = {}; + V1_0::Request request = {}; request.inputs = inputArguments; request.outputs = outputArguments; diff --git a/test/Tests.cpp b/test/Tests.cpp index 5c388cb0..3b629a7a 100644 --- a/test/Tests.cpp +++ b/test/Tests.cpp @@ -30,10 +30,10 @@ BOOST_AUTO_TEST_CASE(TestCapabilities) // Making the driver object on the stack causes a weird libc error, so make it on the heap instead auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus error; + V1_0::ErrorStatus error; V1_0::Capabilities cap; - auto cb = [&](ErrorStatus status, const V1_0::Capabilities& capabilities) + auto cb = [&](V1_0::ErrorStatus status, const V1_0::Capabilities& capabilities) { error = status; cap = capabilities; @@ -41,7 +41,7 @@ BOOST_AUTO_TEST_CASE(TestCapabilities) driver->getCapabilities(cb); - BOOST_TEST((int)error == (int)ErrorStatus::NONE); + BOOST_TEST((int)error == (int)V1_0::ErrorStatus::NONE); BOOST_TEST(cap.float32Performance.execTime > 0.f); BOOST_TEST(cap.float32Performance.powerUsage > 0.f); BOOST_TEST(cap.quantized8Performance.execTime > 0.f); -- cgit v1.2.1