aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-09-07 09:25:10 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-18 12:40:42 +0100
commit8b287c23f5141102555ba869a34397ec720a3e4f (patch)
tree4a60d9c806669aad06816414601ac532946d6878
parent77605826a353981d41f0ee346850d411770535f8 (diff)
downloadandroid-nn-driver-8b287c23f5141102555ba869a34397ec720a3e4f.tar.gz
IVGCVSW-1806 More Android NN Driver refactoring
* Changed #if defined to #ifdef * Simplified the Android ML namespace resolution * Fixed the relative path in some include directives Change-Id: I46e46faff98559c8042c1a4b8b82007f462df57d
-rw-r--r--1.0/ArmnnDriver.hpp4
-rw-r--r--1.0/HalPolicy.hpp2
-rw-r--r--1.1/ArmnnDriver.hpp4
-rw-r--r--1.1/HalPolicy.hpp2
-rw-r--r--ArmnnDriver.hpp4
-rw-r--r--ArmnnDriverImpl.cpp2
-rw-r--r--ArmnnPreparedModel.cpp2
-rw-r--r--ModelToINetworkConverter.cpp2
-rw-r--r--RequestThread.cpp2
-rw-r--r--test/Concurrent.cpp4
-rw-r--r--test/DriverTestHelpers.cpp12
-rw-r--r--test/DriverTestHelpers.hpp8
-rw-r--r--test/GenericLayerTests.cpp40
-rw-r--r--test/Lstm.cpp4
-rw-r--r--test/Merger.cpp4
-rw-r--r--test/Tests.cpp4
-rw-r--r--test/UtilsTests.cpp2
17 files changed, 51 insertions, 51 deletions
diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp
index a048973f..52c34c46 100644
--- a/1.0/ArmnnDriver.hpp
+++ b/1.0/ArmnnDriver.hpp
@@ -7,7 +7,7 @@
#include <HalInterfaces.h>
-#include "ArmnnDevice.hpp"
+#include "../ArmnnDevice.hpp"
#include "ArmnnDriverImpl.hpp"
#include "HalPolicy.hpp"
@@ -67,4 +67,4 @@ public:
};
} // namespace hal_1_0
-} // namespace armnn_driver \ No newline at end of file
+} // namespace armnn_driver
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
index fe41d073..484a99b2 100644
--- a/1.0/HalPolicy.hpp
+++ b/1.0/HalPolicy.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include "ConversionUtils.hpp"
+#include "../ConversionUtils.hpp"
#include <HalInterfaces.h>
diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp
index ef8bca8a..2548c8ac 100644
--- a/1.1/ArmnnDriver.hpp
+++ b/1.1/ArmnnDriver.hpp
@@ -7,7 +7,7 @@
#include <HalInterfaces.h>
-#include "ArmnnDevice.hpp"
+#include "../ArmnnDevice.hpp"
#include "ArmnnDriverImpl.hpp"
#include "HalPolicy.hpp"
@@ -114,4 +114,4 @@ public:
};
} // namespace hal_1_1
-} // namespace armnn_driver \ No newline at end of file
+} // namespace armnn_driver
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index 5efe813b..af858781 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include "ConversionUtils.hpp"
+#include "../ConversionUtils.hpp"
#include <HalInterfaces.h>
diff --git a/ArmnnDriver.hpp b/ArmnnDriver.hpp
index fd5cfad0..7c6e5d0b 100644
--- a/ArmnnDriver.hpp
+++ b/ArmnnDriver.hpp
@@ -9,7 +9,7 @@
#include <log/log.h>
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1
#include "1.1/ArmnnDriver.hpp"
@@ -29,7 +29,7 @@ public:
} // namespace armnn_driver
-#else // Fallback to ::android::hardware::neuralnetworks::V1_0.
+#else // Fallback to ::android::hardware::neuralnetworks::V1_0
#include "1.0/ArmnnDriver.hpp"
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 10da1dd3..5d213659 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -241,7 +241,7 @@ Return<DeviceStatus> ArmnnDriverImpl<HalPolicy>::getStatus()
template class ArmnnDriverImpl<hal_1_0::HalPolicy>;
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#ifdef ARMNN_ANDROID_NN_V1_1
template class ArmnnDriverImpl<hal_1_1::HalPolicy>;
#endif
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 85251ef1..d7f727f5 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -301,7 +301,7 @@ void ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
template class ArmnnPreparedModel<hal_1_0::HalPolicy>;
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#ifdef ARMNN_ANDROID_NN_V1_1
template class ArmnnPreparedModel<hal_1_1::HalPolicy>;
#endif
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 1a632805..8bf84e94 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -167,7 +167,7 @@ bool ModelToINetworkConverter<HalPolicy>::IsOperationSupported(uint32_t operatio
template class ModelToINetworkConverter<hal_1_0::HalPolicy>;
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#ifdef ARMNN_ANDROID_NN_V1_1
template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
#endif
diff --git a/RequestThread.cpp b/RequestThread.cpp
index 0b06b51e..4731489e 100644
--- a/RequestThread.cpp
+++ b/RequestThread.cpp
@@ -137,7 +137,7 @@ void RequestThread<HalVersion>::Process()
template class RequestThread<hal_1_0::HalPolicy>;
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#ifdef ARMNN_ANDROID_NN_V1_1
template class RequestThread<hal_1_1::HalPolicy>;
#endif
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 92f6f8fb..55a1a395 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -24,7 +24,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
ALOGI("ConcurrentExecute: entry");
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- neuralnetworks::V1_0::Model model = {};
+ V1_0::Model model = {};
// add operands
int32_t actValue = 0;
@@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
// make the fully connected operation
model.operations.resize(1);
- model.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index ded24592..3bc0a20a 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -109,7 +109,7 @@ void AddPoolAndSetData(uint32_t size, Request& request, const float* data)
memcpy(dst, data, size * sizeof(float));
}
-android::sp<IPreparedModel> PrepareModelWithStatus(const neuralnetworks::V1_0::Model& model,
+android::sp<IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
armnn_driver::ArmnnDriver& driver,
ErrorStatus& prepareStatus,
ErrorStatus expectedStatus)
@@ -126,15 +126,15 @@ android::sp<IPreparedModel> PrepareModelWithStatus(const neuralnetworks::V1_0::M
return cb->GetPreparedModel();
}
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
+#ifdef ARMNN_ANDROID_NN_V1_1
-android::sp<IPreparedModel> PrepareModelWithStatus(const neuralnetworks::V1_1::Model& model,
+android::sp<IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
armnn_driver::ArmnnDriver& driver,
ErrorStatus& prepareStatus,
ErrorStatus expectedStatus)
{
android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
- driver.prepareModel_1_1(model, neuralnetworks::V1_1::ExecutionPreference::LOW_POWER, cb);
+ driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb);
prepareStatus = cb->GetErrorStatus();
BOOST_TEST(prepareStatus == expectedStatus);
@@ -172,12 +172,12 @@ template<>
OperandType TypeToOperandType<float>()
{
return OperandType::TENSOR_FLOAT32;
-};
+}
template<>
OperandType TypeToOperandType<int32_t>()
{
return OperandType::TENSOR_INT32;
-};
+}
} // namespace driverTestHelpers
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index ce09ee68..cce220e5 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -31,7 +31,7 @@ std::ostream& operator<<(std::ostream& os, ErrorStatus stat);
namespace driverTestHelpers
{
-std::ostream& operator<<(std::ostream& os, android::hardware::neuralnetworks::V1_0::ErrorStatus stat);
+std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat);
struct ExecutionCallback : public IExecutionCallback
{
@@ -172,14 +172,14 @@ void AddOutputOperand(HalModel& model,
model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
}
-android::sp<IPreparedModel> PrepareModelWithStatus(const ::android::hardware::neuralnetworks::V1_0::Model& model,
+android::sp<IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
armnn_driver::ArmnnDriver& driver,
ErrorStatus& prepareStatus,
ErrorStatus expectedStatus = ErrorStatus::NONE);
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
+#ifdef ARMNN_ANDROID_NN_V1_1
-android::sp<IPreparedModel> PrepareModelWithStatus(const ::android::hardware::neuralnetworks::V1_1::Model& model,
+android::sp<IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
armnn_driver::ArmnnDriver& driver,
ErrorStatus& prepareStatus,
ErrorStatus expectedStatus = ErrorStatus::NONE);
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index c66854f4..63198b4c 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
supported = _supported;
};
- neuralnetworks::V1_0::Model model0 = {};
+ V1_0::Model model0 = {};
// Add operands
int32_t actValue = 0;
@@ -41,7 +41,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model0.operations.resize(1);
// Make a correct fully connected operation
- model0.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+ model0.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model0.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model0.operations[0].outputs = hidl_vec<uint32_t>{4};
@@ -50,7 +50,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(supported.size() == (size_t)1);
BOOST_TEST(supported[0] == true);
- neuralnetworks::V1_0::Model model1 = {};
+ V1_0::Model model1 = {};
AddInputOperand (model1, hidl_vec<uint32_t>{1, 3});
AddTensorOperand(model1, hidl_vec<uint32_t>{1, 3}, weightValue);
@@ -61,14 +61,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model1.operations.resize(2);
// Make a correct fully connected operation
- model1.operations[0].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+ model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model1.operations[0].outputs = hidl_vec<uint32_t>{4};
// Add an incorrect fully connected operation
AddIntOperand (model1, actValue);
AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
- model1.operations[1].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+ model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED;
model1.operations[1].inputs = hidl_vec<uint32_t>{4}; // Only 1 input operand, expected 4
model1.operations[1].outputs = hidl_vec<uint32_t>{5};
@@ -89,7 +89,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
#endif
// Test Broadcast on add/mul operators
- neuralnetworks::V1_0::Model model2 = {};
+ V1_0::Model model2 = {};
AddInputOperand (model2, hidl_vec<uint32_t>{1, 1, 3, 4});
AddInputOperand (model2, hidl_vec<uint32_t>{4});
@@ -99,11 +99,11 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model2.operations.resize(2);
- model2.operations[0].type = neuralnetworks::V1_0::OperationType::ADD;
+ model2.operations[0].type = V1_0::OperationType::ADD;
model2.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
model2.operations[0].outputs = hidl_vec<uint32_t>{3};
- model2.operations[1].type = neuralnetworks::V1_0::OperationType::MUL;
+ model2.operations[1].type = V1_0::OperationType::MUL;
model2.operations[1].inputs = hidl_vec<uint32_t>{0, 1, 2};
model2.operations[1].outputs = hidl_vec<uint32_t>{4};
@@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(supported[0] == true);
BOOST_TEST(supported[1] == true);
- neuralnetworks::V1_0::Model model3 = {};
+ V1_0::Model model3 = {};
AddInputOperand (model3, hidl_vec<uint32_t>{1, 1, 1, 8});
AddIntOperand (model3, 2);
@@ -122,7 +122,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model3.operations.resize(1);
// Add unsupported operation, should return no error but we don't support it
- model3.operations[0].type = neuralnetworks::V1_0::OperationType::DEPTH_TO_SPACE;
+ model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE;
model3.operations[0].inputs = hidl_vec<uint32_t>{0, 1};
model3.operations[0].outputs = hidl_vec<uint32_t>{2};
@@ -131,14 +131,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(supported.size() == (size_t)1);
BOOST_TEST(supported[0] == false);
- neuralnetworks::V1_0::Model model4 = {};
+ V1_0::Model model4 = {};
AddIntOperand(model4, 0);
model4.operations.resize(1);
// Add invalid operation
- model4.operations[0].type = static_cast<neuralnetworks::V1_0::OperationType>(100);
+ model4.operations[0].type = static_cast<V1_0::OperationType>(100);
model4.operations[0].outputs = hidl_vec<uint32_t>{0};
driver->getSupportedOperations(model4, cb);
@@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
supported = _supported;
};
- neuralnetworks::V1_0::Model model = {};
+ V1_0::Model model = {};
// Operands
int32_t actValue = 0;
@@ -170,11 +170,11 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
float biasValue[] = {4};
// HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do
- AddInputOperand (model, hidl_vec<uint32_t>{1, 1, 3, 4}, neuralnetworks::V1_0::OperandType::TENSOR_INT32);
- AddInputOperand (model, hidl_vec<uint32_t>{4}, neuralnetworks::V1_0::OperandType::TENSOR_INT32);
+ AddInputOperand (model, hidl_vec<uint32_t>{1, 1, 3, 4}, V1_0::OperandType::TENSOR_INT32);
+ AddInputOperand (model, hidl_vec<uint32_t>{4}, V1_0::OperandType::TENSOR_INT32);
AddInputOperand (model, hidl_vec<uint32_t>{1, 1, 3, 4});
AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4});
- AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4}, neuralnetworks::V1_0::OperandType::TENSOR_QUANT8_ASYMM);
+ AddOutputOperand(model, hidl_vec<uint32_t>{1, 1, 3, 4}, V1_0::OperandType::TENSOR_QUANT8_ASYMM);
// Fully connected is supported
AddInputOperand (model, hidl_vec<uint32_t>{1, 3});
@@ -189,17 +189,17 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
model.operations.resize(3);
// Unsupported
- model.operations[0].type = neuralnetworks::V1_0::OperationType::HASHTABLE_LOOKUP;
+ model.operations[0].type = V1_0::OperationType::HASHTABLE_LOOKUP;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2};
model.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
// Supported
- model.operations[1].type = neuralnetworks::V1_0::OperationType::FULLY_CONNECTED;
+ model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[1].inputs = hidl_vec<uint32_t>{5, 6, 7, 8};
model.operations[1].outputs = hidl_vec<uint32_t>{9};
// Unsupported
- model.operations[2].type = neuralnetworks::V1_0::OperationType::EMBEDDING_LOOKUP;
+ model.operations[2].type = V1_0::OperationType::EMBEDDING_LOOKUP;
model.operations[2].inputs = hidl_vec<uint32_t>{1, 2};
model.operations[2].outputs = hidl_vec<uint32_t>{10};
@@ -227,7 +227,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
supported = _supported;
};
- neuralnetworks::V1_0::Model model = {};
+ V1_0::Model model = {};
model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
diff --git a/test/Lstm.cpp b/test/Lstm.cpp
index fe6e5d0d..f5d26b2e 100644
--- a/test/Lstm.cpp
+++ b/test/Lstm.cpp
@@ -114,7 +114,7 @@ void LstmTestImpl(hidl_vec<uint32_t> inputDimensions,
std::vector<float> outputValue)
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc));
- neuralnetworks::V1_0::Model model = {};
+ V1_0::Model model = {};
// Inputs:
// 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
@@ -198,7 +198,7 @@ void LstmTestImpl(hidl_vec<uint32_t> inputDimensions,
// make the lstm operation
model.operations.resize(1);
- model.operations[0].type = neuralnetworks::V1_0::OperationType::LSTM;
+ model.operations[0].type = V1_0::OperationType::LSTM;
model.operations[0].inputs =
hidl_vec<uint32_t> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
diff --git a/test/Merger.cpp b/test/Merger.cpp
index a296d8d9..118e0d66 100644
--- a/test/Merger.cpp
+++ b/test/Merger.cpp
@@ -30,7 +30,7 @@ MergerTestImpl(const std::vector<const TestTensor*> & inputs,
ErrorStatus expectedExecStatus=ErrorStatus::NONE)
{
std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
- neuralnetworks::V1_0::Model model{};
+ V1_0::Model model{};
hidl_vec<uint32_t> modelInputIds;
modelInputIds.resize(inputs.size()+1);
@@ -45,7 +45,7 @@ MergerTestImpl(const std::vector<const TestTensor*> & inputs,
// make the concat operation
model.operations.resize(1);
- model.operations[0].type = neuralnetworks::V1_0::OperationType::CONCATENATION;
+ model.operations[0].type = V1_0::OperationType::CONCATENATION;
model.operations[0].inputs = modelInputIds;
model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
diff --git a/test/Tests.cpp b/test/Tests.cpp
index 865c6d06..5c388cb0 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -31,9 +31,9 @@ BOOST_AUTO_TEST_CASE(TestCapabilities)
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
ErrorStatus error;
- neuralnetworks::V1_0::Capabilities cap;
+ V1_0::Capabilities cap;
- auto cb = [&](ErrorStatus status, const neuralnetworks::V1_0::Capabilities& capabilities)
+ auto cb = [&](ErrorStatus status, const V1_0::Capabilities& capabilities)
{
error = status;
cap = capabilities;
diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp
index 2bb81d81..6ac1ebb0 100644
--- a/test/UtilsTests.cpp
+++ b/test/UtilsTests.cpp
@@ -96,7 +96,7 @@ public:
}
std::string m_RequestInputsAndOutputsDumpDir;
- neuralnetworks::V1_0::Model m_Model;
+ V1_0::Model m_Model;
private:
std::string m_FileName;