From ce3e84a8d449cbf31cee57e30f0eef6a96c0ce94 Mon Sep 17 00:00:00 2001 From: telsoa01 Date: Fri, 31 Aug 2018 09:31:35 +0100 Subject: Release 18.08 --- test/GenericLayerTests.cpp | 222 +++++++++++++++++++++++++++------------------ 1 file changed, 133 insertions(+), 89 deletions(-) (limited to 'test/GenericLayerTests.cpp') diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp index 7116f0b0..aa91ce15 100644 --- a/test/GenericLayerTests.cpp +++ b/test/GenericLayerTests.cpp @@ -8,189 +8,233 @@ BOOST_AUTO_TEST_SUITE(GenericLayerTests) -using ArmnnDriver = armnn_driver::ArmnnDriver; -using DriverOptions = armnn_driver::DriverOptions; +using namespace android::hardware; using namespace driverTestHelpers; +using namespace armnn_driver; BOOST_AUTO_TEST_CASE(GetSupportedOperations) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus error; - std::vector sup; + ErrorStatus errorStatus; + std::vector supported; - ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector& supported) + auto cb = [&](ErrorStatus _errorStatus, const std::vector& _supported) { - error = status; - sup = supported; + errorStatus = _errorStatus; + supported = _supported; }; - V1_0::Model model1 = {}; + neuralnetworks::V1_0::Model model0 = {}; - // add operands + // Add operands int32_t actValue = 0; float weightValue[] = {2, 4, 1}; float biasValue[] = {4}; - AddInputOperand(model1, hidl_vec{1, 3}); + AddInputOperand (model0, hidl_vec{1, 3}); + AddTensorOperand(model0, hidl_vec{1, 3}, weightValue); + AddTensorOperand(model0, hidl_vec{1}, biasValue); + AddIntOperand (model0, actValue); + AddOutputOperand(model0, hidl_vec{1, 1}); + + model0.operations.resize(1); + + // Make a correct fully connected operation + model0.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model0.operations[0].inputs = hidl_vec{0, 1, 2, 3}; + model0.operations[0].outputs = hidl_vec{4}; + + driver->getSupportedOperations(model0, cb); + BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST(supported.size() == (size_t)1); + BOOST_TEST(supported[0] == true); + + neuralnetworks::V1_0::Model model1 = {}; + + AddInputOperand (model1, hidl_vec{1, 3}); AddTensorOperand(model1, hidl_vec{1, 3}, weightValue); AddTensorOperand(model1, hidl_vec{1}, biasValue); - AddIntOperand(model1, actValue); + AddIntOperand (model1, actValue); AddOutputOperand(model1, hidl_vec{1, 1}); - // make a correct fully connected operation model1.operations.resize(2); - model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + + // Make a correct fully connected operation + model1.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; model1.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model1.operations[0].outputs = hidl_vec{4}; - // make an incorrect fully connected operation - AddIntOperand(model1, actValue); + // Add an incorrect fully connected operation + AddIntOperand (model1, actValue); AddOutputOperand(model1, hidl_vec{1, 1}); - model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; - model1.operations[1].inputs = hidl_vec{4}; + model1.operations[1].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model1.operations[1].inputs = hidl_vec{4}; // Only 1 input operand, expected 4 model1.operations[1].outputs = hidl_vec{5}; driver->getSupportedOperations(model1, cb); - BOOST_TEST((int)error == (int)ErrorStatus::NONE); - BOOST_TEST(sup[0] == true); - BOOST_TEST(sup[1] == false); - // Broadcast add/mul are not supported - V1_0::Model model2 = {}; - - AddInputOperand(model2, hidl_vec{1, 1, 3, 4}); - AddInputOperand(model2, hidl_vec{4}); +#if defined(ARMNN_ANDROID_P) + // In Android P, android::nn::validateModel returns INVALID_ARGUMENT, because of the wrong number of inputs for the + // fully connected layer (1 instead of 4) + BOOST_TEST((int)errorStatus == (int)ErrorStatus::INVALID_ARGUMENT); + BOOST_TEST(supported.empty()); +#else + // In Android O, android::nn::validateModel indicates that the second (wrong) fully connected layer in unsupported + // in the vector of flags returned by the callback + BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST(supported.size() == (size_t)2); + BOOST_TEST(supported[0] == true); + BOOST_TEST(supported[1] == false); +#endif + + // Test Broadcast on add/mul operators + neuralnetworks::V1_0::Model model2 = {}; + + AddInputOperand (model2, hidl_vec{1, 1, 3, 4}); + AddInputOperand (model2, hidl_vec{4}); + AddIntOperand (model2, actValue); AddOutputOperand(model2, hidl_vec{1, 1, 3, 4}); AddOutputOperand(model2, hidl_vec{1, 1, 3, 4}); model2.operations.resize(2); - model2.operations[0].type = V1_0::OperationType::ADD; - model2.operations[0].inputs = hidl_vec{0,1}; - model2.operations[0].outputs = hidl_vec{2}; + model2.operations[0].type = neuralnetworks::V1_0::OperationType::ADD; + model2.operations[0].inputs = hidl_vec{0, 1, 2}; + model2.operations[0].outputs = hidl_vec{3}; - model2.operations[1].type = V1_0::OperationType::MUL; - model2.operations[1].inputs = hidl_vec{0,1}; - model2.operations[1].outputs = hidl_vec{3}; + model2.operations[1].type = neuralnetworks::V1_0::OperationType::MUL; + model2.operations[1].inputs = hidl_vec{0, 1, 2}; + model2.operations[1].outputs = hidl_vec{4}; driver->getSupportedOperations(model2, cb); - BOOST_TEST((int)error == (int)ErrorStatus::NONE); - BOOST_TEST(sup[0] == false); - BOOST_TEST(sup[1] == false); + BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST(supported.size() == (size_t)2); + BOOST_TEST(supported[0] == true); + BOOST_TEST(supported[1] == true); - V1_0::Model model3 = {}; + neuralnetworks::V1_0::Model model3 = {}; - // Add unsupported operation, should return no error but we don't support it - AddInputOperand(model3, hidl_vec{1, 1, 1, 8}); - AddIntOperand(model3, 2); + AddInputOperand (model3, hidl_vec{1, 1, 1, 8}); + AddIntOperand (model3, 2); AddOutputOperand(model3, hidl_vec{1, 2, 2, 2}); + model3.operations.resize(1); - model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE; - model1.operations[0].inputs = hidl_vec{0, 1}; + + // Add unsupported operation, should return no error but we don't support it + model3.operations[0].type = neuralnetworks::V1_0::OperationType::DEPTH_TO_SPACE; + model3.operations[0].inputs = hidl_vec{0, 1}; model3.operations[0].outputs = hidl_vec{2}; driver->getSupportedOperations(model3, cb); - BOOST_TEST((int)error == (int)ErrorStatus::NONE); - BOOST_TEST(sup[0] == false); + BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST(supported.size() == (size_t)1); + BOOST_TEST(supported[0] == false); + + neuralnetworks::V1_0::Model model4 = {}; - // Add invalid operation - V1_0::Model model4 = {}; AddIntOperand(model4, 0); + model4.operations.resize(1); - model4.operations[0].type = static_cast(100); + + // Add invalid operation + model4.operations[0].type = static_cast(100); model4.operations[0].outputs = hidl_vec{0}; driver->getSupportedOperations(model4, cb); - BOOST_TEST((int)error == (int)ErrorStatus::INVALID_ARGUMENT); + BOOST_TEST((int)errorStatus == (int)ErrorStatus::INVALID_ARGUMENT); + BOOST_TEST(supported.empty()); } // The purpose of this test is to ensure that when encountering an unsupported operation -// it is skipped and getSupportedOperations() continues (rather than failing and stopping). -// As per IVGCVSW-710. +// it is skipped and getSupportedOperations() continues (rather than failing and stopping). +// As per IVGCVSW-710. BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - ErrorStatus error; - std::vector sup; + ErrorStatus errorStatus; + std::vector supported; - ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector& supported) + auto cb = [&](ErrorStatus _errorStatus, const std::vector& _supported) { - error = status; - sup = supported; + errorStatus = _errorStatus; + supported = _supported; }; - V1_0::Model model = {}; + neuralnetworks::V1_0::Model model = {}; - // operands + // Operands int32_t actValue = 0; float weightValue[] = {2, 4, 1}; float biasValue[] = {4}; - // broadcast add is unsupported at the time of writing this test, but any unsupported layer will do - AddInputOperand(model, hidl_vec{1, 1, 3, 4}); - AddInputOperand(model, hidl_vec{4}); + // HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do + AddInputOperand (model, hidl_vec{1, 1, 3, 4}, neuralnetworks::V1_0::OperandType::TENSOR_INT32); + AddInputOperand (model, hidl_vec{4}, neuralnetworks::V1_0::OperandType::TENSOR_INT32); + AddInputOperand (model, hidl_vec{1, 1, 3, 4}); AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); + AddOutputOperand(model, hidl_vec{1, 1, 3, 4}, neuralnetworks::V1_0::OperandType::TENSOR_QUANT8_ASYMM); - // fully connected - AddInputOperand(model, hidl_vec{1, 3}); + // Fully connected is supported + AddInputOperand (model, hidl_vec{1, 3}); AddTensorOperand(model, hidl_vec{1, 3}, weightValue); AddTensorOperand(model, hidl_vec{1}, biasValue); - AddIntOperand(model, actValue); + AddIntOperand (model, actValue); AddOutputOperand(model, hidl_vec{1, 1}); - // broadcast mul is unsupported + // EMBEDDING_LOOKUP is unsupported AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); model.operations.resize(3); - // unsupported - model.operations[0].type = V1_0::OperationType::ADD; - model.operations[0].inputs = hidl_vec{0,1}; - model.operations[0].outputs = hidl_vec{2}; + // Unsupported + model.operations[0].type = neuralnetworks::V1_0::OperationType::HASHTABLE_LOOKUP; + model.operations[0].inputs = hidl_vec{0, 1, 2}; + model.operations[0].outputs = hidl_vec{3, 4}; - // supported - model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; - model.operations[1].inputs = hidl_vec{3, 4, 5, 6}; - model.operations[1].outputs = hidl_vec{7}; + // Supported + model.operations[1].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED; + model.operations[1].inputs = hidl_vec{5, 6, 7, 8}; + model.operations[1].outputs = hidl_vec{9}; - // unsupported - model.operations[2].type = V1_0::OperationType::MUL; - model.operations[2].inputs = hidl_vec{0,1}; - model.operations[2].outputs = hidl_vec{8}; + // Unsupported + model.operations[2].type = neuralnetworks::V1_0::OperationType::EMBEDDING_LOOKUP; + model.operations[2].inputs = hidl_vec{1, 2}; + model.operations[2].outputs = hidl_vec{10}; - // we are testing that the unsupported layers return false and the test continues - // rather than failing and stopping. + // We are testing that the unsupported layers return false and the test continues rather than failing and stopping driver->getSupportedOperations(model, cb); - BOOST_TEST((int)error == (int)ErrorStatus::NONE); - BOOST_TEST(sup[0] == false); - BOOST_TEST(sup[1] == true); - BOOST_TEST(sup[2] == false); + BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE); + BOOST_TEST(supported.size() == (size_t)3); + BOOST_TEST(supported[0] == false); + BOOST_TEST(supported[1] == true); + BOOST_TEST(supported[2] == false); } // The purpose of this test is to ensure that when encountering an failure -// during mem pool mapping we properly report an error to the framework via a callback +// during mem pool mapping we properly report an error to the framework via a callback BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail) { auto driver = std::make_unique(armnn::Compute::CpuRef); - ErrorStatus error; - std::vector sup; + ErrorStatus errorStatus; + std::vector supported; - ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector& supported) + auto cb = [&](ErrorStatus _errorStatus, const std::vector& _supported) { - error = status; - sup = supported; + errorStatus = _errorStatus; + supported = _supported; }; - V1_0::Model model = {}; + neuralnetworks::V1_0::Model model = {}; model.pools = hidl_vec{hidl_memory("Unsuported hidl memory type", nullptr, 0)}; - //memory pool mapping should fail, we should report an error + // Memory pool mapping should fail, we should report an error driver->getSupportedOperations(model, cb); - BOOST_TEST((int)error == (int)ErrorStatus::GENERAL_FAILURE); + BOOST_TEST((int)errorStatus != (int)ErrorStatus::NONE); + BOOST_TEST(supported.empty()); } BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1