From deb3bdbe028a59da0759dd7a560387d03a11d322 Mon Sep 17 00:00:00 2001 From: surmeh01 Date: Thu, 5 Jul 2018 12:06:04 +0100 Subject: Release 18.05.02 --- test/Android.mk | 7 +++++++ test/Concurrent.cpp | 4 ++-- test/Convolution2D.cpp | 4 ++-- test/DriverTestHelpers.cpp | 12 ++++++------ test/DriverTestHelpers.hpp | 14 +++++++------- test/FullyConnected.cpp | 12 ++++++------ test/GenericLayerTests.cpp | 30 +++++++++++++++--------------- test/Merger.cpp | 4 ++-- test/Tests.cpp | 4 ++-- test/UtilsTests.cpp | 2 +- 10 files changed, 50 insertions(+), 43 deletions(-) (limited to 'test') diff --git a/test/Android.mk b/test/Android.mk index d74afecc..97e9a903 100644 --- a/test/Android.mk +++ b/test/Android.mk @@ -61,6 +61,13 @@ LOCAL_SHARED_LIBRARIES := \ android.hidl.memory@1.0 \ libOpenCL +ifeq ($(PLATFORM_VERSION),9) +# Required to build the 1.0 version of the NN Driver on Android P and later versions, +# as the 1.0 version of the NN API needs the 1.1 HAL headers to be included regardless. +LOCAL_SHARED_LIBRARIES+= \ + android.hardware.neuralnetworks@1.1 +endif + LOCAL_MODULE := armnn-driver-tests LOCAL_MODULE_TAGS := eng optional diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp index 16734dc3..c2d58bde 100644 --- a/test/Concurrent.cpp +++ b/test/Concurrent.cpp @@ -22,7 +22,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute) ALOGI("ConcurrentExecute: entry"); auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - Model model = {}; + V1_0::Model model = {}; // add operands int32_t actValue = 0; @@ -37,7 +37,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute) // make the fully connected operation model.operations.resize(1); - model.operations[0].type = OperationType::FULLY_CONNECTED; + model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model.operations[0].outputs = hidl_vec{4}; diff --git a/test/Convolution2D.cpp b/test/Convolution2D.cpp index 90edb415..cc301bc9 100644 --- a/test/Convolution2D.cpp +++ b/test/Convolution2D.cpp @@ -20,7 +20,7 @@ namespace void PaddingTestImpl(android::nn::PaddingScheme paddingScheme) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - Model model = {}; + V1_0::Model model = {}; uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1; @@ -39,7 +39,7 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme) // make the convolution operation model.operations.resize(1); - model.operations[0].type = OperationType::CONV_2D; + model.operations[0].type = V1_0::OperationType::CONV_2D; model.operations[0].inputs = hidl_vec{0, 1, 2, 3, 4, 5, 6}; model.operations[0].outputs = hidl_vec{7}; diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp index 5b371921..d2d380a7 100644 --- a/test/DriverTestHelpers.cpp +++ b/test/DriverTestHelpers.cpp @@ -107,13 +107,13 @@ void AddPoolAndSetData(uint32_t size, Request& request, const float* data) memcpy(dst, data, size * sizeof(float)); } -void AddOperand(Model& model, const Operand& op) +void AddOperand(V1_0::Model& model, const Operand& op) { model.operands.resize(model.operands.size() + 1); model.operands[model.operands.size() - 1] = op; } -void AddIntOperand(Model& model, int32_t value) +void AddIntOperand(V1_0::Model& model, int32_t value) { DataLocation location = {}; location.offset = model.operandValues.size(); @@ -131,7 +131,7 @@ void AddIntOperand(Model& model, int32_t value) AddOperand(model, op); } -void AddInputOperand(Model& model, hidl_vec dimensions) +void AddInputOperand(V1_0::Model& model, hidl_vec dimensions) { Operand op = {}; op.type = OperandType::TENSOR_FLOAT32; @@ -144,7 +144,7 @@ void AddInputOperand(Model& model, hidl_vec dimensions) model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1; } -void AddOutputOperand(Model& model, hidl_vec dimensions) +void AddOutputOperand(V1_0::Model& model, hidl_vec dimensions) { Operand op = {}; op.type = OperandType::TENSOR_FLOAT32; @@ -158,7 +158,7 @@ void AddOutputOperand(Model& model, hidl_vec dimensions) } -android::sp PrepareModelWithStatus(const Model& model, +android::sp PrepareModelWithStatus(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver, ErrorStatus & prepareStatus, ErrorStatus expectedStatus) @@ -176,7 +176,7 @@ android::sp PrepareModelWithStatus(const Model& model, return cb->GetPreparedModel(); } -android::sp PrepareModel(const Model& model, +android::sp PrepareModel(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver) { ErrorStatus prepareStatus = ErrorStatus::NONE; diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp index e90f7ecf..57541a35 100644 --- a/test/DriverTestHelpers.hpp +++ b/test/DriverTestHelpers.hpp @@ -72,9 +72,9 @@ android::sp AddPoolAndGetData(uint32_t size, Request& request); void AddPoolAndSetData(uint32_t size, Request& request, const float* data); -void AddOperand(Model& model, const Operand& op); +void AddOperand(V1_0::Model& model, const Operand& op); -void AddIntOperand(Model& model, int32_t value); +void AddIntOperand(V1_0::Model& model, int32_t value); template OperandType TypeToOperandType(); @@ -86,7 +86,7 @@ template<> OperandType TypeToOperandType(); template -void AddTensorOperand(Model& model, hidl_vec dimensions, T* values) +void AddTensorOperand(V1_0::Model& model, hidl_vec dimensions, T* values) { uint32_t totalElements = 1; for (uint32_t dim : dimensions) @@ -113,14 +113,14 @@ void AddTensorOperand(Model& model, hidl_vec dimensions, T* values) AddOperand(model, op); } -void AddInputOperand(Model& model, hidl_vec dimensions); +void AddInputOperand(V1_0::Model& model, hidl_vec dimensions); -void AddOutputOperand(Model& model, hidl_vec dimensions); +void AddOutputOperand(V1_0::Model& model, hidl_vec dimensions); -android::sp PrepareModel(const Model& model, +android::sp PrepareModel(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver); -android::sp PrepareModelWithStatus(const Model& model, +android::sp PrepareModelWithStatus(const V1_0::Model& model, armnn_driver::ArmnnDriver& driver, ErrorStatus & prepareStatus, ErrorStatus expectedStatus=ErrorStatus::NONE); diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp index ea6c8715..4feda30b 100644 --- a/test/FullyConnected.cpp +++ b/test/FullyConnected.cpp @@ -19,7 +19,7 @@ BOOST_AUTO_TEST_CASE(FullyConnected) // but that uses slightly weird dimensions which I don't think we need to support for now auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - Model model = {}; + V1_0::Model model = {}; // add operands int32_t actValue = 0; @@ -34,7 +34,7 @@ BOOST_AUTO_TEST_CASE(FullyConnected) // make the fully connected operation model.operations.resize(1); - model.operations[0].type = OperationType::FULLY_CONNECTED; + model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model.operations[0].outputs = hidl_vec{4}; @@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput) sup = supported; }; - Model model = {}; + V1_0::Model model = {}; // operands int32_t actValue = 0; @@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput) model.operations.resize(1); - model.operations[0].type = OperationType::FULLY_CONNECTED; + model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0,1,2,3}; model.operations[0].outputs = hidl_vec{4}; @@ -177,7 +177,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape) sup = supported; }; - Model model = {}; + V1_0::Model model = {}; // operands int32_t actValue = 0; @@ -200,7 +200,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape) model.operations.resize(1); - model.operations[0].type = OperationType::FULLY_CONNECTED; + model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0,1,2,3}; model.operations[0].outputs = hidl_vec{4}; diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp index 5c6c041d..7116f0b0 100644 --- a/test/GenericLayerTests.cpp +++ b/test/GenericLayerTests.cpp @@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) sup = supported; }; - Model model1 = {}; + V1_0::Model model1 = {}; // add operands int32_t actValue = 0; @@ -40,14 +40,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) // make a correct fully connected operation model1.operations.resize(2); - model1.operations[0].type = OperationType::FULLY_CONNECTED; + model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; model1.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model1.operations[0].outputs = hidl_vec{4}; // make an incorrect fully connected operation AddIntOperand(model1, actValue); AddOutputOperand(model1, hidl_vec{1, 1}); - model1.operations[1].type = OperationType::FULLY_CONNECTED; + model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; model1.operations[1].inputs = hidl_vec{4}; model1.operations[1].outputs = hidl_vec{5}; @@ -57,7 +57,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(sup[1] == false); // Broadcast add/mul are not supported - Model model2 = {}; + V1_0::Model model2 = {}; AddInputOperand(model2, hidl_vec{1, 1, 3, 4}); AddInputOperand(model2, hidl_vec{4}); @@ -66,11 +66,11 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) model2.operations.resize(2); - model2.operations[0].type = OperationType::ADD; + model2.operations[0].type = V1_0::OperationType::ADD; model2.operations[0].inputs = hidl_vec{0,1}; model2.operations[0].outputs = hidl_vec{2}; - model2.operations[1].type = OperationType::MUL; + model2.operations[1].type = V1_0::OperationType::MUL; model2.operations[1].inputs = hidl_vec{0,1}; model2.operations[1].outputs = hidl_vec{3}; @@ -79,14 +79,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(sup[0] == false); BOOST_TEST(sup[1] == false); - Model model3 = {}; + V1_0::Model model3 = {}; // Add unsupported operation, should return no error but we don't support it AddInputOperand(model3, hidl_vec{1, 1, 1, 8}); AddIntOperand(model3, 2); AddOutputOperand(model3, hidl_vec{1, 2, 2, 2}); model3.operations.resize(1); - model3.operations[0].type = OperationType::DEPTH_TO_SPACE; + model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE; model1.operations[0].inputs = hidl_vec{0, 1}; model3.operations[0].outputs = hidl_vec{2}; @@ -95,10 +95,10 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(sup[0] == false); // Add invalid operation - Model model4 = {}; + V1_0::Model model4 = {}; AddIntOperand(model4, 0); model4.operations.resize(1); - model4.operations[0].type = static_cast(100); + model4.operations[0].type = static_cast(100); model4.operations[0].outputs = hidl_vec{0}; driver->getSupportedOperations(model4, cb); @@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) sup = supported; }; - Model model = {}; + V1_0::Model model = {}; // operands int32_t actValue = 0; @@ -146,17 +146,17 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) model.operations.resize(3); // unsupported - model.operations[0].type = OperationType::ADD; + model.operations[0].type = V1_0::OperationType::ADD; model.operations[0].inputs = hidl_vec{0,1}; model.operations[0].outputs = hidl_vec{2}; // supported - model.operations[1].type = OperationType::FULLY_CONNECTED; + model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; model.operations[1].inputs = hidl_vec{3, 4, 5, 6}; model.operations[1].outputs = hidl_vec{7}; // unsupported - model.operations[2].type = OperationType::MUL; + model.operations[2].type = V1_0::OperationType::MUL; model.operations[2].inputs = hidl_vec{0,1}; model.operations[2].outputs = hidl_vec{8}; @@ -184,7 +184,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail) sup = supported; }; - Model model = {}; + V1_0::Model model = {}; model.pools = hidl_vec{hidl_memory("Unsuported hidl memory type", nullptr, 0)}; diff --git a/test/Merger.cpp b/test/Merger.cpp index 6c069a86..48253604 100644 --- a/test/Merger.cpp +++ b/test/Merger.cpp @@ -25,7 +25,7 @@ MergerTestImpl(const std::vector & inputs, ErrorStatus expectedExecStatus=ErrorStatus::NONE) { std::unique_ptr driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - Model model{}; + V1_0::Model model{}; hidl_vec modelInputIds; modelInputIds.resize(inputs.size()+1); @@ -40,7 +40,7 @@ MergerTestImpl(const std::vector & inputs, // make the concat operation model.operations.resize(1); - model.operations[0].type = OperationType::CONCATENATION; + model.operations[0].type = V1_0::OperationType::CONCATENATION; model.operations[0].inputs = modelInputIds; model.operations[0].outputs = hidl_vec{static_cast(inputs.size()+1)}; diff --git a/test/Tests.cpp b/test/Tests.cpp index 37aece7c..3fa8e125 100644 --- a/test/Tests.cpp +++ b/test/Tests.cpp @@ -31,9 +31,9 @@ BOOST_AUTO_TEST_CASE(TestCapabilities) auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); ErrorStatus error; - Capabilities cap; + V1_0::Capabilities cap; - ArmnnDriver::getCapabilities_cb cb = [&](ErrorStatus status, const Capabilities& capabilities) + ArmnnDriver::getCapabilities_cb cb = [&](ErrorStatus status, const V1_0::Capabilities& capabilities) { error = status; cap = capabilities; diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp index b429920c..e7e6cde7 100644 --- a/test/UtilsTests.cpp +++ b/test/UtilsTests.cpp @@ -95,7 +95,7 @@ public: } std::string m_RequestInputsAndOutputsDumpDir; - Model m_Model; + V1_0::Model m_Model; private: std::string m_FileName; -- cgit v1.2.1