From 8b287c23f5141102555ba869a34397ec720a3e4f Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Fri, 7 Sep 2018 09:25:10 +0100 Subject: IVGCVSW-1806 More Android NN Driver refactoring * Changed #if defined to #ifdef * Simplified the Android ML namespace resolution * Fixed the relative path in some include directives Change-Id: I46e46faff98559c8042c1a4b8b82007f462df57d --- 1.0/ArmnnDriver.hpp | 4 ++-- 1.0/HalPolicy.hpp | 2 +- 1.1/ArmnnDriver.hpp | 4 ++-- 1.1/HalPolicy.hpp | 2 +- ArmnnDriver.hpp | 4 ++-- ArmnnDriverImpl.cpp | 2 +- ArmnnPreparedModel.cpp | 2 +- ModelToINetworkConverter.cpp | 2 +- RequestThread.cpp | 2 +- test/Concurrent.cpp | 4 ++-- test/DriverTestHelpers.cpp | 12 ++++++------ test/DriverTestHelpers.hpp | 8 ++++---- test/GenericLayerTests.cpp | 40 ++++++++++++++++++++-------------------- test/Lstm.cpp | 4 ++-- test/Merger.cpp | 4 ++-- test/Tests.cpp | 4 ++-- test/UtilsTests.cpp | 2 +- 17 files changed, 51 insertions(+), 51 deletions(-) diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp index a048973f..52c34c46 100644 --- a/1.0/ArmnnDriver.hpp +++ b/1.0/ArmnnDriver.hpp @@ -7,7 +7,7 @@ #include -#include "ArmnnDevice.hpp" +#include "../ArmnnDevice.hpp" #include "ArmnnDriverImpl.hpp" #include "HalPolicy.hpp" @@ -67,4 +67,4 @@ public: }; } // namespace hal_1_0 -} // namespace armnn_driver \ No newline at end of file +} // namespace armnn_driver diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp index fe41d073..484a99b2 100644 --- a/1.0/HalPolicy.hpp +++ b/1.0/HalPolicy.hpp @@ -5,7 +5,7 @@ #pragma once -#include "ConversionUtils.hpp" +#include "../ConversionUtils.hpp" #include diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp index ef8bca8a..2548c8ac 100644 --- a/1.1/ArmnnDriver.hpp +++ b/1.1/ArmnnDriver.hpp @@ -7,7 +7,7 @@ #include -#include "ArmnnDevice.hpp" +#include "../ArmnnDevice.hpp" #include "ArmnnDriverImpl.hpp" #include "HalPolicy.hpp" @@ -114,4 +114,4 @@ public: }; } // namespace hal_1_1 -} // namespace armnn_driver \ No newline at end of file +} // namespace armnn_driver diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp index 5efe813b..af858781 100644 --- a/1.1/HalPolicy.hpp +++ b/1.1/HalPolicy.hpp @@ -5,7 +5,7 @@ #pragma once -#include "ConversionUtils.hpp" +#include "../ConversionUtils.hpp" #include diff --git a/ArmnnDriver.hpp b/ArmnnDriver.hpp index fd5cfad0..7c6e5d0b 100644 --- a/ArmnnDriver.hpp +++ b/ArmnnDriver.hpp @@ -9,7 +9,7 @@ #include -#if defined(ARMNN_ANDROID_NN_V1_1) +#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1 #include "1.1/ArmnnDriver.hpp" @@ -29,7 +29,7 @@ public: } // namespace armnn_driver -#else // Fallback to ::android::hardware::neuralnetworks::V1_0. +#else // Fallback to ::android::hardware::neuralnetworks::V1_0 #include "1.0/ArmnnDriver.hpp" diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp index 10da1dd3..5d213659 100644 --- a/ArmnnDriverImpl.cpp +++ b/ArmnnDriverImpl.cpp @@ -241,7 +241,7 @@ Return ArmnnDriverImpl::getStatus() template class ArmnnDriverImpl; -#if defined(ARMNN_ANDROID_NN_V1_1) +#ifdef ARMNN_ANDROID_NN_V1_1 template class ArmnnDriverImpl; #endif diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp index 85251ef1..d7f727f5 100644 --- a/ArmnnPreparedModel.cpp +++ b/ArmnnPreparedModel.cpp @@ -301,7 +301,7 @@ void ArmnnPreparedModel::ExecuteWithDummyInputs() template class ArmnnPreparedModel; -#if defined(ARMNN_ANDROID_NN_V1_1) +#ifdef ARMNN_ANDROID_NN_V1_1 template class ArmnnPreparedModel; #endif diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp index 1a632805..8bf84e94 100644 --- a/ModelToINetworkConverter.cpp +++ b/ModelToINetworkConverter.cpp @@ -167,7 +167,7 @@ bool ModelToINetworkConverter::IsOperationSupported(uint32_t operatio template class ModelToINetworkConverter; -#if defined(ARMNN_ANDROID_NN_V1_1) +#ifdef ARMNN_ANDROID_NN_V1_1 template class ModelToINetworkConverter; #endif diff --git a/RequestThread.cpp b/RequestThread.cpp index 0b06b51e..4731489e 100644 --- a/RequestThread.cpp +++ b/RequestThread.cpp @@ -137,7 +137,7 @@ void RequestThread::Process() template class RequestThread; -#if defined(ARMNN_ANDROID_NN_V1_1) +#ifdef ARMNN_ANDROID_NN_V1_1 template class RequestThread; #endif diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp index 92f6f8fb..55a1a395 100644 --- a/test/Concurrent.cpp +++ b/test/Concurrent.cpp @@ -24,7 +24,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute) ALOGI("ConcurrentExecute: entry"); auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - neuralnetworks::V1_0::Model model = {}; + V1_0::Model model = {}; // add operands int32_t actValue = 0; @@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute) // make the fully connected operation model.operations.resize(1); - model.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model.operations[0].outputs = hidl_vec{4}; diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp index ded24592..3bc0a20a 100644 --- a/test/DriverTestHelpers.cpp +++ b/test/DriverTestHelpers.cpp @@ -109,7 +109,7 @@ void AddPoolAndSetData(uint32_t size, Request& request, const float* data) memcpy(dst, data, size * sizeof(float)); } -android::sp PrepareModelWithStatus(const neuralnetworks::V1_0::Model& model, +android::sp PrepareModelWithStatus(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver, ErrorStatus& prepareStatus, ErrorStatus expectedStatus) @@ -126,15 +126,15 @@ android::sp PrepareModelWithStatus(const neuralnetworks::V1_0::M return cb->GetPreparedModel(); } -#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. +#ifdef ARMNN_ANDROID_NN_V1_1 -android::sp PrepareModelWithStatus(const neuralnetworks::V1_1::Model& model, +android::sp PrepareModelWithStatus(const V1_1::Model& model, armnn_driver::ArmnnDriver& driver, ErrorStatus& prepareStatus, ErrorStatus expectedStatus) { android::sp cb(new PreparedModelCallback()); - driver.prepareModel_1_1(model, neuralnetworks::V1_1::ExecutionPreference::LOW_POWER, cb); + driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb); prepareStatus = cb->GetErrorStatus(); BOOST_TEST(prepareStatus == expectedStatus); @@ -172,12 +172,12 @@ template<> OperandType TypeToOperandType() { return OperandType::TENSOR_FLOAT32; -}; +} template<> OperandType TypeToOperandType() { return OperandType::TENSOR_INT32; -}; +} } // namespace driverTestHelpers diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp index ce09ee68..cce220e5 100644 --- a/test/DriverTestHelpers.hpp +++ b/test/DriverTestHelpers.hpp @@ -31,7 +31,7 @@ std::ostream& operator<<(std::ostream& os, ErrorStatus stat); namespace driverTestHelpers { -std::ostream& operator<<(std::ostream& os, android::hardware::neuralnetworks::V1_0::ErrorStatus stat); +std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat); struct ExecutionCallback : public IExecutionCallback { @@ -172,14 +172,14 @@ void AddOutputOperand(HalModel& model, model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1; } -android::sp PrepareModelWithStatus(const ::android::hardware::neuralnetworks::V1_0::Model& model, +android::sp PrepareModelWithStatus(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver, ErrorStatus& prepareStatus, ErrorStatus expectedStatus = ErrorStatus::NONE); -#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1. +#ifdef ARMNN_ANDROID_NN_V1_1 -android::sp PrepareModelWithStatus(const ::android::hardware::neuralnetworks::V1_1::Model& model, +android::sp PrepareModelWithStatus(const V1_1::Model& model, armnn_driver::ArmnnDriver& driver, ErrorStatus& prepareStatus, ErrorStatus expectedStatus = ErrorStatus::NONE); diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp index c66854f4..63198b4c 100644 --- a/test/GenericLayerTests.cpp +++ b/test/GenericLayerTests.cpp @@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) supported = _supported; }; - neuralnetworks::V1_0::Model model0 = {}; + V1_0::Model model0 = {}; // Add operands int32_t actValue = 0; @@ -41,7 +41,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model0.operations.resize(1); // Make a correct fully connected operation - model0.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model0.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model0.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model0.operations[0].outputs = hidl_vec{4}; @@ -50,7 +50,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(supported.size() == (size_t)1); BOOST_TEST(supported[0] == true); - neuralnetworks::V1_0::Model model1 = {}; + V1_0::Model model1 = {}; AddInputOperand (model1, hidl_vec{1, 3}); AddTensorOperand(model1, hidl_vec{1, 3}, weightValue); @@ -61,14 +61,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model1.operations.resize(2); // Make a correct fully connected operation - model1.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model1.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model1.operations[0].outputs = hidl_vec{4}; // Add an incorrect fully connected operation AddIntOperand (model1, actValue); AddOutputOperand(model1, hidl_vec{1, 1}); - model1.operations[1].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; model1.operations[1].inputs = hidl_vec{4}; // Only 1 input operand, expected 4 model1.operations[1].outputs = hidl_vec{5}; @@ -89,7 +89,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) #endif // Test Broadcast on add/mul operators - neuralnetworks::V1_0::Model model2 = {}; + V1_0::Model model2 = {}; AddInputOperand (model2, hidl_vec{1, 1, 3, 4}); AddInputOperand (model2, hidl_vec{4}); @@ -99,11 +99,11 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model2.operations.resize(2); - model2.operations[0].type = neuralnetworks::V1_0::OperationType::ADD; + model2.operations[0].type = V1_0::OperationType::ADD; model2.operations[0].inputs = hidl_vec{0, 1, 2}; model2.operations[0].outputs = hidl_vec{3}; - model2.operations[1].type = neuralnetworks::V1_0::OperationType::MUL; + model2.operations[1].type = V1_0::OperationType::MUL; model2.operations[1].inputs = hidl_vec{0, 1, 2}; model2.operations[1].outputs = hidl_vec{4}; @@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(supported[0] == true); BOOST_TEST(supported[1] == true); - neuralnetworks::V1_0::Model model3 = {}; + V1_0::Model model3 = {}; AddInputOperand (model3, hidl_vec{1, 1, 1, 8}); AddIntOperand (model3, 2); @@ -122,7 +122,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model3.operations.resize(1); // Add unsupported operation, should return no error but we don't support it - model3.operations[0].type = neuralnetworks::V1_0::OperationType::DEPTH_TO_SPACE; + model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE; model3.operations[0].inputs = hidl_vec{0, 1}; model3.operations[0].outputs = hidl_vec{2}; @@ -131,14 +131,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(supported.size() == (size_t)1); BOOST_TEST(supported[0] == false); - neuralnetworks::V1_0::Model model4 = {}; + V1_0::Model model4 = {}; AddIntOperand(model4, 0); model4.operations.resize(1); // Add invalid operation - model4.operations[0].type = static_cast(100); + model4.operations[0].type = static_cast(100); model4.operations[0].outputs = hidl_vec{0}; driver->getSupportedOperations(model4, cb); @@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) supported = _supported; }; - neuralnetworks::V1_0::Model model = {}; + V1_0::Model model = {}; // Operands int32_t actValue = 0; @@ -170,11 +170,11 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) float biasValue[] = {4}; // HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do - AddInputOperand (model, hidl_vec{1, 1, 3, 4}, neuralnetworks::V1_0::OperandType::TENSOR_INT32); - AddInputOperand (model, hidl_vec{4}, neuralnetworks::V1_0::OperandType::TENSOR_INT32); + AddInputOperand (model, hidl_vec{1, 1, 3, 4}, V1_0::OperandType::TENSOR_INT32); + AddInputOperand (model, hidl_vec{4}, V1_0::OperandType::TENSOR_INT32); AddInputOperand (model, hidl_vec{1, 1, 3, 4}); AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); - AddOutputOperand(model, hidl_vec{1, 1, 3, 4}, neuralnetworks::V1_0::OperandType::TENSOR_QUANT8_ASYMM); + AddOutputOperand(model, hidl_vec{1, 1, 3, 4}, V1_0::OperandType::TENSOR_QUANT8_ASYMM); // Fully connected is supported AddInputOperand (model, hidl_vec{1, 3}); @@ -189,17 +189,17 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) model.operations.resize(3); // Unsupported - model.operations[0].type = neuralnetworks::V1_0::OperationType::HASHTABLE_LOOKUP; + model.operations[0].type = V1_0::OperationType::HASHTABLE_LOOKUP; model.operations[0].inputs = hidl_vec{0, 1, 2}; model.operations[0].outputs = hidl_vec{3, 4}; // Supported - model.operations[1].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[1].inputs = hidl_vec{5, 6, 7, 8}; model.operations[1].outputs = hidl_vec{9}; // Unsupported - model.operations[2].type = neuralnetworks::V1_0::OperationType::EMBEDDING_LOOKUP; + model.operations[2].type = V1_0::OperationType::EMBEDDING_LOOKUP; model.operations[2].inputs = hidl_vec{1, 2}; model.operations[2].outputs = hidl_vec{10}; @@ -227,7 +227,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail) supported = _supported; }; - neuralnetworks::V1_0::Model model = {}; + V1_0::Model model = {}; model.pools = hidl_vec{hidl_memory("Unsuported hidl memory type", nullptr, 0)}; diff --git a/test/Lstm.cpp b/test/Lstm.cpp index fe6e5d0d..f5d26b2e 100644 --- a/test/Lstm.cpp +++ b/test/Lstm.cpp @@ -114,7 +114,7 @@ void LstmTestImpl(hidl_vec inputDimensions, std::vector outputValue) { auto driver = std::make_unique(DriverOptions(armnn::Compute::GpuAcc)); - neuralnetworks::V1_0::Model model = {}; + V1_0::Model model = {}; // Inputs: // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where @@ -198,7 +198,7 @@ void LstmTestImpl(hidl_vec inputDimensions, // make the lstm operation model.operations.resize(1); - model.operations[0].type = neuralnetworks::V1_0::OperationType::LSTM; + model.operations[0].type = V1_0::OperationType::LSTM; model.operations[0].inputs = hidl_vec {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}; model.operations[0].outputs = hidl_vec {23, 24, 25, 26}; diff --git a/test/Merger.cpp b/test/Merger.cpp index a296d8d9..118e0d66 100644 --- a/test/Merger.cpp +++ b/test/Merger.cpp @@ -30,7 +30,7 @@ MergerTestImpl(const std::vector & inputs, ErrorStatus expectedExecStatus=ErrorStatus::NONE) { std::unique_ptr driver = std::make_unique(DriverOptions(computeDevice)); - neuralnetworks::V1_0::Model model{}; + V1_0::Model model{}; hidl_vec modelInputIds; modelInputIds.resize(inputs.size()+1); @@ -45,7 +45,7 @@ MergerTestImpl(const std::vector & inputs, // make the concat operation model.operations.resize(1); - model.operations[0].type = neuralnetworks::V1_0::OperationType::CONCATENATION; + model.operations[0].type = V1_0::OperationType::CONCATENATION; model.operations[0].inputs = modelInputIds; model.operations[0].outputs = hidl_vec{static_cast(inputs.size()+1)}; diff --git a/test/Tests.cpp b/test/Tests.cpp index 865c6d06..5c388cb0 100644 --- a/test/Tests.cpp +++ b/test/Tests.cpp @@ -31,9 +31,9 @@ BOOST_AUTO_TEST_CASE(TestCapabilities) auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); ErrorStatus error; - neuralnetworks::V1_0::Capabilities cap; + V1_0::Capabilities cap; - auto cb = [&](ErrorStatus status, const neuralnetworks::V1_0::Capabilities& capabilities) + auto cb = [&](ErrorStatus status, const V1_0::Capabilities& capabilities) { error = status; cap = capabilities; diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp index 2bb81d81..6ac1ebb0 100644 --- a/test/UtilsTests.cpp +++ b/test/UtilsTests.cpp @@ -96,7 +96,7 @@ public: } std::string m_RequestInputsAndOutputsDumpDir; - neuralnetworks::V1_0::Model m_Model; + V1_0::Model m_Model; private: std::string m_FileName; -- cgit v1.2.1