aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-07-05 12:06:04 +0100
committersurmeh01 <surabhi.mehta@arm.com>2018-07-05 12:06:04 +0100
commitdeb3bdbe028a59da0759dd7a560387d03a11d322 (patch)
tree869b7ee10d8f1f19a0861e0b552bb453330adf0a
parent49b9e100bfbb3b8da01472a0ff48b2bd92944e01 (diff)
downloadandroid-nn-driver-deb3bdbe028a59da0759dd7a560387d03a11d322.tar.gz
Release 18.05.02
-rw-r--r--Android.mk40
-rw-r--r--ArmnnDriver.cpp26
-rw-r--r--ArmnnDriver.hpp14
-rw-r--r--ArmnnPreparedModel.cpp10
-rw-r--r--ArmnnPreparedModel.hpp10
-rw-r--r--ModelToINetworkConverter.cpp112
-rw-r--r--ModelToINetworkConverter.hpp72
-rw-r--r--Utils.cpp17
-rw-r--r--Utils.hpp6
-rw-r--r--test/Android.mk7
-rw-r--r--test/Concurrent.cpp4
-rw-r--r--test/Convolution2D.cpp4
-rw-r--r--test/DriverTestHelpers.cpp12
-rw-r--r--test/DriverTestHelpers.hpp14
-rw-r--r--test/FullyConnected.cpp12
-rw-r--r--test/GenericLayerTests.cpp30
-rw-r--r--test/Merger.cpp4
-rw-r--r--test/Tests.cpp4
-rw-r--r--test/UtilsTests.cpp2
19 files changed, 236 insertions, 164 deletions
diff --git a/Android.mk b/Android.mk
index d6b013e9..e69514c2 100644
--- a/Android.mk
+++ b/Android.mk
@@ -7,6 +7,8 @@ ANDROID_NN_DRIVER_LOCAL_PATH := $(call my-dir)
LOCAL_PATH := $(ANDROID_NN_DRIVER_LOCAL_PATH)
# Configure these paths if you move the source or Khronos headers
+ARMNN_HEADER_PATH := $(LOCAL_PATH)/armnn/include
+ARMNN_UTILS_HEADER_PATH := $(LOCAL_PATH)/armnn/src/armnnUtils
OPENCL_HEADER_PATH := $(LOCAL_PATH)/clframework/include
NN_HEADER_PATH := $(LOCAL_PATH)/../../../frameworks/ml/nn/runtime/include
@@ -22,7 +24,9 @@ LOCAL_PROPRIETARY_MODULE := true
# Mark source files as dependent on Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-LOCAL_C_INCLUDES := \
+LOCAL_C_INCLUDES := \
+ $(ARMNN_HEADER_PATH) \
+ $(ARMNN_UTILS_HEADER_PATH) \
$(OPENCL_HEADER_PATH) \
$(NN_HEADER_PATH)
@@ -31,11 +35,17 @@ LOCAL_CFLAGS := \
-fexceptions \
-Werror \
-Wno-format-security
+ifeq ($(PLATFORM_VERSION),9)
+# Required to build with the changes made to the Android ML framework starting from Android P,
+# regardless of the HAL version used for the build.
+LOCAL_CFLAGS+= \
+ -DARMNN_ANDROID_P
+endif
ifeq ($(ARMNN_DRIVER_DEBUG),1)
LOCAL_CFLAGS+= -UNDEBUG
endif
-LOCAL_SRC_FILES := \
+LOCAL_SRC_FILES := \
ArmnnDriver.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
@@ -49,9 +59,9 @@ LOCAL_STATIC_LIBRARIES := \
libboost_program_options \
libboost_system \
libboost_thread \
- armnn-arm_compute \
+ armnn-arm_compute
-LOCAL_SHARED_LIBRARIES := \
+LOCAL_SHARED_LIBRARIES := \
libbase \
libhidlbase \
libhidltransport \
@@ -62,6 +72,12 @@ LOCAL_SHARED_LIBRARIES := \
android.hidl.allocator@1.0 \
android.hidl.memory@1.0 \
libOpenCL
+ifeq ($(PLATFORM_VERSION),9)
+# Required to build the 1.0 version of the NN Driver on Android P and later versions,
+# as the 1.0 version of the NN API needs the 1.1 HAL headers to be included regardless.
+LOCAL_SHARED_LIBRARIES+= \
+ android.hardware.neuralnetworks@1.1
+endif
include $(BUILD_STATIC_LIBRARY)
@@ -80,6 +96,7 @@ LOCAL_PROPRIETARY_MODULE := true
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_C_INCLUDES := \
+ $(ARMNN_HEADER_PATH) \
$(NN_HEADER_PATH)
LOCAL_CFLAGS := \
@@ -89,7 +106,7 @@ ifeq ($(ARMNN_DRIVER_DEBUG),1)
LOCAL_CFLAGS+= -UNDEBUG
endif
-LOCAL_SRC_FILES := \
+LOCAL_SRC_FILES := \
service.cpp
LOCAL_STATIC_LIBRARIES := \
@@ -101,8 +118,13 @@ LOCAL_STATIC_LIBRARIES := \
libboost_system \
libboost_thread \
armnn-arm_compute
+ifeq ($(PLATFORM_VERSION),9)
+# Required to build the 1.0 version of the NN Driver on Android P and later versions.
+LOCAL_STATIC_LIBRARIES+= \
+ libomp
+endif
-LOCAL_SHARED_LIBRARIES := \
+LOCAL_SHARED_LIBRARIES := \
libbase \
libhidlbase \
libhidltransport \
@@ -116,6 +138,12 @@ LOCAL_SHARED_LIBRARIES := \
android.hidl.allocator@1.0 \
android.hidl.memory@1.0 \
libOpenCL
+ifeq ($(PLATFORM_VERSION),9)
+# Required to build the 1.0 version of the NN Driver on Android P and later versions,
+# as the 1.0 version of the NN API needs the 1.1 HAL headers to be included regardless.
+LOCAL_SHARED_LIBRARIES+= \
+ android.hardware.neuralnetworks@1.1
+endif
include $(BUILD_EXECUTABLE)
diff --git a/ArmnnDriver.cpp b/ArmnnDriver.cpp
index 92487ccd..4d58249e 100644
--- a/ArmnnDriver.cpp
+++ b/ArmnnDriver.cpp
@@ -15,6 +15,12 @@
#include "OperationsUtils.h"
+#if defined(ARMNN_ANDROID_P)
+// The headers of the ML framework have changed between Android O and Android P.
+// The validation functions have been moved into their own header, ValidateHal.h.
+#include <ValidateHal.h>
+#endif
+
#include <boost/algorithm/string/predicate.hpp>
#include <boost/program_options.hpp>
@@ -207,11 +213,11 @@ ArmnnDriver::ArmnnDriver(DriverOptions options)
}
}
-Return<void> ArmnnDriver::getCapabilities(getCapabilities_cb cb)
+Return<void> ArmnnDriver::getCapabilities(V1_0::IDevice::getCapabilities_cb cb)
{
ALOGV("ArmnnDriver::getCapabilities()");
- Capabilities capabilities;
+ V1_0::Capabilities capabilities;
if (m_Runtime)
{
capabilities.float32Performance.execTime =
@@ -241,7 +247,7 @@ Return<void> ArmnnDriver::getCapabilities(getCapabilities_cb cb)
return Void();
}
-Return<void> ArmnnDriver::getSupportedOperations(const Model& model, getSupportedOperations_cb cb)
+Return<void> ArmnnDriver::getSupportedOperations(const V1_0::Model& model, V1_0::IDevice::getSupportedOperations_cb cb)
{
ALOGV("ArmnnDriver::getSupportedOperations()");
@@ -310,7 +316,7 @@ Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
}
-Return<ErrorStatus> ArmnnDriver::prepareModel(const Model& model,
+Return<ErrorStatus> ArmnnDriver::prepareModel(const V1_0::Model& model,
const sp<IPreparedModelCallback>& cb)
{
ALOGV("ArmnnDriver::prepareModel()");
@@ -357,7 +363,8 @@ Return<ErrorStatus> ArmnnDriver::prepareModel(const Model& model,
if (modelConverter.GetConversionResult() != ConversionResult::Success)
{
- return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+ return ErrorStatus::NONE;
}
// optimize the network
@@ -370,14 +377,16 @@ Return<ErrorStatus> ArmnnDriver::prepareModel(const Model& model,
{
std::stringstream message;
message << "armnn::Exception ("<<e.what()<<") caught from optimize.";
- return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return ErrorStatus::NONE;
}
// Check that the optimized network is valid.
if (!optNet)
{
- return FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
"ArmnnDriver::prepareModel: Invalid optimized network", cb);
+ return ErrorStatus::NONE;
}
// Export the optimized network graph to a dot file if an output dump directory
@@ -400,7 +409,8 @@ Return<ErrorStatus> ArmnnDriver::prepareModel(const Model& model,
{
std::stringstream message;
message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
- return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return ErrorStatus::NONE;
}
std::unique_ptr<ArmnnPreparedModel> preparedModel(new ArmnnPreparedModel(
diff --git a/ArmnnDriver.hpp b/ArmnnDriver.hpp
index 8b66e774..e8dc3bfb 100644
--- a/ArmnnDriver.hpp
+++ b/ArmnnDriver.hpp
@@ -13,6 +13,10 @@
#include <set>
#include <string>
+// For Android O, explicitly declare the V1_0 HAL namespace to shorten type declarations,
+// as the namespace is not defined in HalInterfaces.h.
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+
namespace armnn_driver
{
@@ -41,14 +45,14 @@ private:
armnn::IClTunedParameters::Mode m_ClTunedParametersMode;
};
-class ArmnnDriver : public IDevice {
+class ArmnnDriver : public V1_0::IDevice {
public:
ArmnnDriver(DriverOptions options);
virtual ~ArmnnDriver() {}
- virtual Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override;
- virtual Return<void> getSupportedOperations(const Model &model,
- getSupportedOperations_cb _hidl_cb) override;
- virtual Return<ErrorStatus> prepareModel(const Model &model,
+ virtual Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb _hidl_cb) override;
+ virtual Return<void> getSupportedOperations(const V1_0::Model &model,
+ V1_0::IDevice::getSupportedOperations_cb _hidl_cb) override;
+ virtual Return<ErrorStatus> prepareModel(const V1_0::Model &model,
const android::sp<IPreparedModelCallback>& callback);
virtual Return<DeviceStatus> getStatus() override;
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 1bd72199..3aad955b 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -12,6 +12,12 @@
#include <log/log.h>
#include <OperationsUtils.h>
+#if defined(ARMNN_ANDROID_P)
+// The headers of the ML framework have changed between Android O and Android P.
+// The validation functions have been moved into their own header, ValidateHal.h.
+#include <ValidateHal.h>
+#endif
+
#include <cassert>
#include <cinttypes>
@@ -101,7 +107,7 @@ void ArmnnPreparedModel::DumpTensorsIfRequired(char const* tensorNamePrefix,
ArmnnPreparedModel::ArmnnPreparedModel(armnn::NetworkId networkId,
armnn::IRuntime* runtime,
- const Model& model,
+ const V1_0::Model& model,
const std::string& requestInputsAndOutputsDumpDir)
: m_NetworkId(networkId)
, m_Runtime(runtime)
@@ -269,7 +275,7 @@ void ArmnnPreparedModel::ExecuteWithDummyInputs()
}
}
-AndroidNnCpuExecutorPreparedModel::AndroidNnCpuExecutorPreparedModel(const Model& model,
+AndroidNnCpuExecutorPreparedModel::AndroidNnCpuExecutorPreparedModel(const V1_0::Model& model,
const std::string& requestInputsAndOutputsDumpDir)
: m_Model(model)
, m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp
index f61d56ce..f7644b95 100644
--- a/ArmnnPreparedModel.hpp
+++ b/ArmnnPreparedModel.hpp
@@ -11,6 +11,8 @@
#include "NeuralNetworks.h"
#include <armnn/ArmNN.hpp>
+#include "ArmnnDriver.hpp"
+
#include <string>
#include <vector>
@@ -22,7 +24,7 @@ class ArmnnPreparedModel : public IPreparedModel
public:
ArmnnPreparedModel(armnn::NetworkId networkId,
armnn::IRuntime* runtime,
- const Model& model,
+ const V1_0::Model& model,
const std::string& requestInputsAndOutputsDumpDir);
virtual ~ArmnnPreparedModel();
@@ -46,7 +48,7 @@ private:
armnn::NetworkId m_NetworkId;
armnn::IRuntime* m_Runtime;
- Model m_Model;
+ V1_0::Model m_Model;
// There must be a single RequestThread for all ArmnnPreparedModel objects to ensure serial execution of workloads
// It is specific to this class, so it is declared as static here
static RequestThread m_RequestThread;
@@ -58,7 +60,7 @@ class AndroidNnCpuExecutorPreparedModel : public IPreparedModel
{
public:
- AndroidNnCpuExecutorPreparedModel(const Model& model, const std::string& requestInputsAndOutputsDumpDir);
+ AndroidNnCpuExecutorPreparedModel(const V1_0::Model& model, const std::string& requestInputsAndOutputsDumpDir);
virtual ~AndroidNnCpuExecutorPreparedModel() { }
bool Initialize();
@@ -74,7 +76,7 @@ private:
const hidl_vec<RequestArgument>& requestArgs,
const std::vector<android::nn::RunTimePoolInfo>& requestPoolInfos);
- Model m_Model;
+ V1_0::Model m_Model;
std::vector<android::nn::RunTimePoolInfo> m_ModelPoolInfos;
const std::string& m_RequestInputsAndOutputsDumpDir;
uint32_t m_RequestCount;
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index bd2443e2..fe4e8ac1 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -115,7 +115,7 @@ void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& out
outPadTail = boost::numeric_cast<uint32_t>(padTail);
}
-bool ValidateBroadcast(const Model& model, const Operation& operation, uint32_t numInputs)
+bool ValidateBroadcast(const V1_0::Model& model, const V1_0::Operation& operation, uint32_t numInputs)
{
assert(operation.inputs.size() > 0); // This should have been validated by the caller
// validateModel() has been called already so we know the operation.inputs indexes are valid within model.operands.
@@ -334,7 +334,7 @@ private:
std::vector<uint8_t> m_SwizzledTensorData;
};
-ModelToINetworkConverter::ModelToINetworkConverter(armnn::Compute compute, const Model& model,
+ModelToINetworkConverter::ModelToINetworkConverter(armnn::Compute compute, const V1_0::Model& model,
const std::set<unsigned int>& forcedUnsupportedOperations)
: m_Compute(compute)
, m_Model(model)
@@ -471,37 +471,37 @@ void ModelToINetworkConverter::Convert()
}
}
-bool ModelToINetworkConverter::ConvertOperation(const Operation& operation)
+bool ModelToINetworkConverter::ConvertOperation(const V1_0::Operation& operation)
{
switch (operation.type)
{
- case OperationType::ADD: return ConvertAdd(operation);
- case OperationType::AVERAGE_POOL_2D: return ConvertAveragePool2d(operation);
- case OperationType::CONCATENATION: return ConvertConcatenation(operation);
- case OperationType::CONV_2D: return ConvertConv2d(operation);
- case OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation);
- case OperationType::FLOOR: return ConvertFloor(operation);
- case OperationType::FULLY_CONNECTED: return ConvertFullyConnected(operation);
- case OperationType::LOCAL_RESPONSE_NORMALIZATION: return ConvertLocalResponseNormalization(operation);
- case OperationType::LOGISTIC: return ConvertLogistic(operation);
- case OperationType::L2_NORMALIZATION: return ConvertL2Normalization(operation);
- case OperationType::L2_POOL_2D: return ConvertL2Pool2d(operation);
- case OperationType::MAX_POOL_2D: return ConvertMaxPool2d(operation);
- case OperationType::MUL: return ConvertMul(operation);
- case OperationType::RELU: return ConvertReLu(operation);
- case OperationType::RELU1: return ConvertReLu1(operation);
- case OperationType::RELU6: return ConvertReLu6(operation);
- case OperationType::SOFTMAX: return ConvertSoftmax(operation);
- case OperationType::TANH: return ConvertTanH(operation);
- case OperationType::RESHAPE: return ConvertReshape(operation);
- case OperationType::RESIZE_BILINEAR: return ConvertResizeBilinear(operation);
+ case V1_0::OperationType::ADD: return ConvertAdd(operation);
+ case V1_0::OperationType::AVERAGE_POOL_2D: return ConvertAveragePool2d(operation);
+ case V1_0::OperationType::CONCATENATION: return ConvertConcatenation(operation);
+ case V1_0::OperationType::CONV_2D: return ConvertConv2d(operation);
+ case V1_0::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation);
+ case V1_0::OperationType::FLOOR: return ConvertFloor(operation);
+ case V1_0::OperationType::FULLY_CONNECTED: return ConvertFullyConnected(operation);
+ case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION: return ConvertLocalResponseNormalization(operation);
+ case V1_0::OperationType::LOGISTIC: return ConvertLogistic(operation);
+ case V1_0::OperationType::L2_NORMALIZATION: return ConvertL2Normalization(operation);
+ case V1_0::OperationType::L2_POOL_2D: return ConvertL2Pool2d(operation);
+ case V1_0::OperationType::MAX_POOL_2D: return ConvertMaxPool2d(operation);
+ case V1_0::OperationType::MUL: return ConvertMul(operation);
+ case V1_0::OperationType::RELU: return ConvertReLu(operation);
+ case V1_0::OperationType::RELU1: return ConvertReLu1(operation);
+ case V1_0::OperationType::RELU6: return ConvertReLu6(operation);
+ case V1_0::OperationType::SOFTMAX: return ConvertSoftmax(operation);
+ case V1_0::OperationType::TANH: return ConvertTanH(operation);
+ case V1_0::OperationType::RESHAPE: return ConvertReshape(operation);
+ case V1_0::OperationType::RESIZE_BILINEAR: return ConvertResizeBilinear(operation);
default: return Fail("%s: Operation type %s not supported in ArmnnDriver",
__func__, toString(operation.type).c_str());
}
}
-bool ModelToINetworkConverter::ConvertAdd(const Operation& operation)
+bool ModelToINetworkConverter::ConvertAdd(const V1_0::Operation& operation)
{
LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
@@ -594,12 +594,12 @@ bool ModelToINetworkConverter::ConvertAdd(const Operation& operation)
}
}
-bool ModelToINetworkConverter::ConvertAveragePool2d(const Operation& operation)
+bool ModelToINetworkConverter::ConvertAveragePool2d(const V1_0::Operation& operation)
{
return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average);
}
-bool ModelToINetworkConverter::ConvertConcatenation(const Operation& operation)
+bool ModelToINetworkConverter::ConvertConcatenation(const V1_0::Operation& operation)
{
// The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
if (operation.inputs.size() <= 1)
@@ -758,7 +758,7 @@ bool ModelToINetworkConverter::ConvertConcatenation(const Operation& operation)
return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
}
-bool ModelToINetworkConverter::ConvertConv2d(const Operation& operation)
+bool ModelToINetworkConverter::ConvertConv2d(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -860,7 +860,7 @@ bool ModelToINetworkConverter::ConvertConv2d(const Operation& operation)
}
}
-bool ModelToINetworkConverter::ConvertDepthwiseConv2d(const Operation& operation)
+bool ModelToINetworkConverter::ConvertDepthwiseConv2d(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -979,7 +979,7 @@ bool ModelToINetworkConverter::ConvertDepthwiseConv2d(const Operation& operation
}
}
-bool ModelToINetworkConverter::ConvertFloor(const Operation& operation)
+bool ModelToINetworkConverter::ConvertFloor(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -1009,7 +1009,7 @@ bool ModelToINetworkConverter::ConvertFloor(const Operation& operation)
return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
}
-bool ModelToINetworkConverter::ConvertFullyConnected(const Operation& operation)
+bool ModelToINetworkConverter::ConvertFullyConnected(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -1100,7 +1100,7 @@ bool ModelToINetworkConverter::ConvertFullyConnected(const Operation& operation)
}
}
-bool ModelToINetworkConverter::ConvertLocalResponseNormalization(const Operation& operation)
+bool ModelToINetworkConverter::ConvertLocalResponseNormalization(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -1158,7 +1158,7 @@ bool ModelToINetworkConverter::ConvertLocalResponseNormalization(const Operation
return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
}
-bool ModelToINetworkConverter::ConvertLogistic(const Operation& operation)
+bool ModelToINetworkConverter::ConvertLogistic(const V1_0::Operation& operation)
{
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::Sigmoid;
@@ -1166,7 +1166,7 @@ bool ModelToINetworkConverter::ConvertLogistic(const Operation& operation)
return ConvertToActivation(operation, __func__, desc);
}
-bool ModelToINetworkConverter::ConvertL2Normalization(const Operation& operation)
+bool ModelToINetworkConverter::ConvertL2Normalization(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -1203,17 +1203,17 @@ bool ModelToINetworkConverter::ConvertL2Normalization(const Operation& operation
return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
}
-bool ModelToINetworkConverter::ConvertL2Pool2d(const Operation& operation)
+bool ModelToINetworkConverter::ConvertL2Pool2d(const V1_0::Operation& operation)
{
return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2);
}
-bool ModelToINetworkConverter::ConvertMaxPool2d(const Operation& operation)
+bool ModelToINetworkConverter::ConvertMaxPool2d(const V1_0::Operation& operation)
{
return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max);
}
-bool ModelToINetworkConverter::ConvertMul(const Operation& operation)
+bool ModelToINetworkConverter::ConvertMul(const V1_0::Operation& operation)
{
LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
@@ -1268,7 +1268,7 @@ bool ModelToINetworkConverter::ConvertMul(const Operation& operation)
}
}
-bool ModelToINetworkConverter::ConvertReLu(const Operation& operation)
+bool ModelToINetworkConverter::ConvertReLu(const V1_0::Operation& operation)
{
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::ReLu;
@@ -1276,7 +1276,7 @@ bool ModelToINetworkConverter::ConvertReLu(const Operation& operation)
return ConvertToActivation(operation, __func__, desc);
}
-bool ModelToINetworkConverter::ConvertReLu1(const Operation& operation)
+bool ModelToINetworkConverter::ConvertReLu1(const V1_0::Operation& operation)
{
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::BoundedReLu;
@@ -1286,7 +1286,7 @@ bool ModelToINetworkConverter::ConvertReLu1(const Operation& operation)
return ConvertToActivation(operation, __func__, desc);
}
-bool ModelToINetworkConverter::ConvertReLu6(const Operation& operation)
+bool ModelToINetworkConverter::ConvertReLu6(const V1_0::Operation& operation)
{
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::BoundedReLu;
@@ -1295,7 +1295,7 @@ bool ModelToINetworkConverter::ConvertReLu6(const Operation& operation)
return ConvertToActivation(operation, __func__, desc);
}
-bool ModelToINetworkConverter::ConvertSoftmax(const Operation& operation)
+bool ModelToINetworkConverter::ConvertSoftmax(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -1325,7 +1325,7 @@ bool ModelToINetworkConverter::ConvertSoftmax(const Operation& operation)
return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
}
-bool ModelToINetworkConverter::ConvertTanH(const Operation& operation)
+bool ModelToINetworkConverter::ConvertTanH(const V1_0::Operation& operation)
{
armnn::ActivationDescriptor desc;
desc.m_Function = armnn::ActivationFunction::TanH;
@@ -1335,7 +1335,7 @@ bool ModelToINetworkConverter::ConvertTanH(const Operation& operation)
return ConvertToActivation(operation, __func__, desc);
}
-bool ModelToINetworkConverter::ConvertReshape(const Operation& operation)
+bool ModelToINetworkConverter::ConvertReshape(const V1_0::Operation& operation)
{
const Operand* inputOperand = GetInputOperand(operation, 0);
const Operand* requestedShapeOperand = GetInputOperand(operation, 1);
@@ -1403,7 +1403,7 @@ bool ModelToINetworkConverter::ConvertReshape(const Operation& operation)
return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
}
-bool ModelToINetworkConverter::ConvertResizeBilinear(const Operation& operation)
+bool ModelToINetworkConverter::ConvertResizeBilinear(const V1_0::Operation& operation)
{
LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
if (!input.IsValid())
@@ -1449,7 +1449,7 @@ bool ModelToINetworkConverter::ConvertResizeBilinear(const Operation& operation)
}
-bool ModelToINetworkConverter::ConvertToActivation(const Operation& operation,
+bool ModelToINetworkConverter::ConvertToActivation(const V1_0::Operation& operation,
const char* operationName,
const armnn::ActivationDescriptor& activationDesc)
{
@@ -1475,7 +1475,7 @@ bool ModelToINetworkConverter::ConvertToActivation(const Operation& operation,
return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
}
-bool ModelToINetworkConverter::ConvertPooling2d(const Operation& operation,
+bool ModelToINetworkConverter::ConvertPooling2d(const V1_0::Operation& operation,
const char* operationName,
armnn::PoolingAlgorithm poolType)
{
@@ -1625,7 +1625,7 @@ const void* ModelToINetworkConverter::GetOperandValueReadOnlyAddress(const Opera
return valueStart;
}
-const Operand* ModelToINetworkConverter::GetInputOperand(const Operation& operation, uint32_t inputIndex) const
+const Operand* ModelToINetworkConverter::GetInputOperand(const V1_0::Operation& operation, uint32_t inputIndex) const
{
if (inputIndex >= operation.inputs.size())
{
@@ -1637,7 +1637,7 @@ const Operand* ModelToINetworkConverter::GetInputOperand(const Operation& operat
return &m_Model.operands[operation.inputs[inputIndex]];
}
-const Operand* ModelToINetworkConverter::GetOutputOperand(const Operation& operation, uint32_t outputIndex) const
+const Operand* ModelToINetworkConverter::GetOutputOperand(const V1_0::Operation& operation, uint32_t outputIndex) const
{
if (outputIndex >= operation.outputs.size())
{
@@ -1650,7 +1650,7 @@ const Operand* ModelToINetworkConverter::GetOutputOperand(const Operation& opera
}
template<typename T>
-bool ModelToINetworkConverter::GetInputScalar(const Operation& operation, uint32_t inputIndex,
+bool ModelToINetworkConverter::GetInputScalar(const V1_0::Operation& operation, uint32_t inputIndex,
OperandType type, T& outValue) const
{
const Operand* operand = GetInputOperand(operation, inputIndex);
@@ -1681,17 +1681,19 @@ bool ModelToINetworkConverter::GetInputScalar(const Operation& operation, uint32
return true;
}
-bool ModelToINetworkConverter::GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const
+bool ModelToINetworkConverter::GetInputInt32(const V1_0::Operation& operation,
+ uint32_t inputIndex, int32_t& outValue) const
{
return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue);
}
-bool ModelToINetworkConverter::GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const
+bool ModelToINetworkConverter::GetInputFloat32(const V1_0::Operation& operation,
+ uint32_t inputIndex, float& outValue) const
{
return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue);
}
-bool ModelToINetworkConverter::GetInputActivationFunction(const Operation& operation,
+bool ModelToINetworkConverter::GetInputActivationFunction(const V1_0::Operation& operation,
uint32_t inputIndex,
ActivationFn& outActivationFunction) const
{
@@ -1705,7 +1707,7 @@ bool ModelToINetworkConverter::GetInputActivationFunction(const Operation& opera
return true;
}
-bool ModelToINetworkConverter::GetInputPaddingScheme(const Operation& operation,
+bool ModelToINetworkConverter::GetInputPaddingScheme(const V1_0::Operation& operation,
uint32_t inputIndex,
android::nn::PaddingScheme& outPaddingScheme) const
{
@@ -1720,7 +1722,7 @@ bool ModelToINetworkConverter::GetInputPaddingScheme(const Operation& operation,
}
LayerInputHandle ModelToINetworkConverter::ConvertToLayerInputHandle(
- const Operation& operation,
+ const V1_0::Operation& operation,
uint32_t inputIndex)
{
const Operand* operand = GetInputOperand(operation, inputIndex);
@@ -1789,7 +1791,7 @@ LayerInputHandle ModelToINetworkConverter::ConvertToLayerInputHandle(
}
}
-ConstTensorPin ModelToINetworkConverter::ConvertOperationInputToConstTensorPin(const Operation& operation,
+ConstTensorPin ModelToINetworkConverter::ConvertOperationInputToConstTensorPin(const V1_0::Operation& operation,
uint32_t inputIndex, const armnn::PermutationVector& dimensionMappings,
const armnn::TensorShape* overrideTensorShape)
{
@@ -1931,7 +1933,7 @@ armnn::IConnectableLayer* ModelToINetworkConverter::ProcessActivation(const armn
return activationLayer;
}
-bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
+bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot(const V1_0::Operation& operation, uint32_t outputIndex,
armnn::IConnectableLayer& layer)
{
const Operand* outputOperand = GetOutputOperand(operation, outputIndex);
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 7ced514b..864a2fcc 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -9,6 +9,8 @@
#include "NeuralNetworks.h"
#include "ActivationFunctor.h"
+#include "ArmnnDriver.hpp"
+
#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
#include <CpuExecutor.h>
@@ -37,7 +39,7 @@ enum class ConversionResult
class ModelToINetworkConverter
{
public:
- ModelToINetworkConverter(armnn::Compute compute, const Model& model,
+ ModelToINetworkConverter(armnn::Compute compute, const V1_0::Model& model,
const std::set<unsigned int>& forcedUnsupportedOperations);
ConversionResult GetConversionResult() const { return m_ConversionResult; }
@@ -50,76 +52,76 @@ public:
private:
void Convert();
- bool ConvertOperation(const Operation& operation);
+ bool ConvertOperation(const V1_0::Operation& operation);
- bool ConvertAdd(const Operation& operation);
+ bool ConvertAdd(const V1_0::Operation& operation);
- bool ConvertAveragePool2d(const Operation& operation);
+ bool ConvertAveragePool2d(const V1_0::Operation& operation);
- bool ConvertConcatenation(const Operation& operation);
+ bool ConvertConcatenation(const V1_0::Operation& operation);
- bool ConvertConv2d(const Operation& operation);
+ bool ConvertConv2d(const V1_0::Operation& operation);
- bool ConvertDepthwiseConv2d(const Operation& operation);
+ bool ConvertDepthwiseConv2d(const V1_0::Operation& operation);
- bool ConvertFloor(const Operation& operation);
+ bool ConvertFloor(const V1_0::Operation& operation);
- bool ConvertFullyConnected(const Operation& operation);
+ bool ConvertFullyConnected(const V1_0::Operation& operation);
- bool ConvertLogistic(const Operation& operation);
+ bool ConvertLogistic(const V1_0::Operation& operation);
- bool ConvertLocalResponseNormalization(const Operation& operation);
+ bool ConvertLocalResponseNormalization(const V1_0::Operation& operation);
- bool ConvertL2Normalization(const Operation& operation);
+ bool ConvertL2Normalization(const V1_0::Operation& operation);
- bool ConvertL2Pool2d(const Operation& operation);
+ bool ConvertL2Pool2d(const V1_0::Operation& operation);
- bool ConvertMaxPool2d(const Operation& operation);
+ bool ConvertMaxPool2d(const V1_0::Operation& operation);
- bool ConvertMul(const Operation& operation);
+ bool ConvertMul(const V1_0::Operation& operation);
- bool ConvertReLu(const Operation& operation);
+ bool ConvertReLu(const V1_0::Operation& operation);
- bool ConvertReLu1(const Operation& operation);
+ bool ConvertReLu1(const V1_0::Operation& operation);
- bool ConvertReLu6(const Operation& operation);
+ bool ConvertReLu6(const V1_0::Operation& operation);
- bool ConvertSoftmax(const Operation& operation);
+ bool ConvertSoftmax(const V1_0::Operation& operation);
- bool ConvertTanH(const Operation& operation);
+ bool ConvertTanH(const V1_0::Operation& operation);
- bool ConvertReshape(const Operation& operation);
+ bool ConvertReshape(const V1_0::Operation& operation);
- bool ConvertResizeBilinear(const Operation& operation);
+ bool ConvertResizeBilinear(const V1_0::Operation& operation);
- bool ConvertToActivation(const Operation& operation, const char* operationName,
+ bool ConvertToActivation(const V1_0::Operation& operation, const char* operationName,
const armnn::ActivationDescriptor& activationDesc);
- bool ConvertPooling2d(const Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
+ bool ConvertPooling2d(const V1_0::Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
- const Operand* GetInputOperand(const Operation& operation, uint32_t inputIndex) const;
+ const Operand* GetInputOperand(const V1_0::Operation& operation, uint32_t inputIndex) const;
- const Operand* GetOutputOperand(const Operation& operation, uint32_t outputIndex) const;
+ const Operand* GetOutputOperand(const V1_0::Operation& operation, uint32_t outputIndex) const;
template<typename T>
- bool GetInputScalar(const Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
+ bool GetInputScalar(const V1_0::Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
- bool GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
+ bool GetInputInt32(const V1_0::Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
- bool GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const;
+ bool GetInputFloat32(const V1_0::Operation& operation, uint32_t inputIndex, float& outValue) const;
- bool GetInputActivationFunction(const Operation& operation, uint32_t inputIndex,
+ bool GetInputActivationFunction(const V1_0::Operation& operation, uint32_t inputIndex,
ActivationFn& outActivationFunction) const;
- bool GetInputPaddingScheme(const Operation& operation, uint32_t inputIndex,
+ bool GetInputPaddingScheme(const V1_0::Operation& operation, uint32_t inputIndex,
android::nn::PaddingScheme& outPaddingScheme) const;
- LayerInputHandle ConvertToLayerInputHandle(const Operation& operation, uint32_t inputIndex);
+ LayerInputHandle ConvertToLayerInputHandle(const V1_0::Operation& operation, uint32_t inputIndex);
- ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation& operation, uint32_t inputIndex,
+ ConstTensorPin ConvertOperationInputToConstTensorPin(const V1_0::Operation& operation, uint32_t inputIndex,
const armnn::PermutationVector& dimensionMappings = g_DontPermute,
const armnn::TensorShape* overrideTensorShape = nullptr);
@@ -134,13 +136,13 @@ private:
armnn::IConnectableLayer* prevLayer);
- bool SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
+ bool SetupAndTrackLayerOutputSlot(const V1_0::Operation& operation, uint32_t outputIndex,
armnn::IConnectableLayer& layer);
// Input data
armnn::Compute m_Compute;
- const Model& m_Model;
+ const V1_0::Model& m_Model;
const std::set<unsigned int>& m_ForcedUnsupportedOperations;
// Output data
diff --git a/Utils.cpp b/Utils.cpp
index 01c2719b..99912201 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -63,8 +63,17 @@ void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::Ru
// find the location within the pool
assert(location.poolIndex < memPools.size());
- uint8_t* memory =
- static_cast<uint8_t*>(static_cast<void*>(memPools[location.poolIndex].buffer)) + location.offset;
+ const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
+
+ // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
+ // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
+#if defined(ARMNN_ANDROID_P) // Use the new Android P implementation.
+ uint8_t* memPoolBuffer = memPool.getBuffer();
+#else // Fallback to the old Android O implementation.
+ uint8_t* memPoolBuffer = memPool.buffer;
+#endif
+
+ uint8_t* memory = memPoolBuffer + location.offset;
return memory;
}
@@ -102,7 +111,7 @@ std::string GetOperandSummary(const Operand& operand)
toString(operand.type);
}
-std::string GetModelSummary(const Model& model)
+std::string GetModelSummary(const V1_0::Model& model)
{
std::stringstream result;
@@ -273,7 +282,7 @@ void DumpTensor(const std::string& dumpDir,
void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
const std::string& dumpDir,
- const Model& model)
+ const V1_0::Model& model)
{
// The dump directory must exist in advance.
if (dumpDir.empty())
diff --git a/Utils.hpp b/Utils.hpp
index e6b56be0..4b5066ee 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -10,6 +10,8 @@
#include <armnn/ArmNN.hpp>
#include <CpuExecutor.h>
+#include "ArmnnDriver.hpp"
+
#include <vector>
#include <string>
@@ -41,7 +43,7 @@ void* GetMemoryFromPool(DataLocation location,
armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
std::string GetOperandSummary(const Operand& operand);
-std::string GetModelSummary(const Model& model);
+std::string GetModelSummary(const V1_0::Model& model);
void DumpTensor(const std::string& dumpDir,
const std::string& requestName,
@@ -50,5 +52,5 @@ void DumpTensor(const std::string& dumpDir,
void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
const std::string& dumpDir,
- const Model& model);
+ const V1_0::Model& model);
}
diff --git a/test/Android.mk b/test/Android.mk
index d74afecc..97e9a903 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -61,6 +61,13 @@ LOCAL_SHARED_LIBRARIES := \
android.hidl.memory@1.0 \
libOpenCL
+ifeq ($(PLATFORM_VERSION),9)
+# Required to build the 1.0 version of the NN Driver on Android P and later versions,
+# as the 1.0 version of the NN API needs the 1.1 HAL headers to be included regardless.
+LOCAL_SHARED_LIBRARIES+= \
+ android.hardware.neuralnetworks@1.1
+endif
+
LOCAL_MODULE := armnn-driver-tests
LOCAL_MODULE_TAGS := eng optional
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 16734dc3..c2d58bde 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -22,7 +22,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
ALOGI("ConcurrentExecute: entry");
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model = {};
+ V1_0::Model model = {};
// add operands
int32_t actValue = 0;
@@ -37,7 +37,7 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute)
// make the fully connected operation
model.operations.resize(1);
- model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
diff --git a/test/Convolution2D.cpp b/test/Convolution2D.cpp
index 90edb415..cc301bc9 100644
--- a/test/Convolution2D.cpp
+++ b/test/Convolution2D.cpp
@@ -20,7 +20,7 @@ namespace
void PaddingTestImpl(android::nn::PaddingScheme paddingScheme)
{
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model = {};
+ V1_0::Model model = {};
uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1;
@@ -39,7 +39,7 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme)
// make the convolution operation
model.operations.resize(1);
- model.operations[0].type = OperationType::CONV_2D;
+ model.operations[0].type = V1_0::OperationType::CONV_2D;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
model.operations[0].outputs = hidl_vec<uint32_t>{7};
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 5b371921..d2d380a7 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -107,13 +107,13 @@ void AddPoolAndSetData(uint32_t size, Request& request, const float* data)
memcpy(dst, data, size * sizeof(float));
}
-void AddOperand(Model& model, const Operand& op)
+void AddOperand(V1_0::Model& model, const Operand& op)
{
model.operands.resize(model.operands.size() + 1);
model.operands[model.operands.size() - 1] = op;
}
-void AddIntOperand(Model& model, int32_t value)
+void AddIntOperand(V1_0::Model& model, int32_t value)
{
DataLocation location = {};
location.offset = model.operandValues.size();
@@ -131,7 +131,7 @@ void AddIntOperand(Model& model, int32_t value)
AddOperand(model, op);
}
-void AddInputOperand(Model& model, hidl_vec<uint32_t> dimensions)
+void AddInputOperand(V1_0::Model& model, hidl_vec<uint32_t> dimensions)
{
Operand op = {};
op.type = OperandType::TENSOR_FLOAT32;
@@ -144,7 +144,7 @@ void AddInputOperand(Model& model, hidl_vec<uint32_t> dimensions)
model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1;
}
-void AddOutputOperand(Model& model, hidl_vec<uint32_t> dimensions)
+void AddOutputOperand(V1_0::Model& model, hidl_vec<uint32_t> dimensions)
{
Operand op = {};
op.type = OperandType::TENSOR_FLOAT32;
@@ -158,7 +158,7 @@ void AddOutputOperand(Model& model, hidl_vec<uint32_t> dimensions)
}
-android::sp<IPreparedModel> PrepareModelWithStatus(const Model& model,
+android::sp<IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
armnn_driver::ArmnnDriver& driver,
ErrorStatus & prepareStatus,
ErrorStatus expectedStatus)
@@ -176,7 +176,7 @@ android::sp<IPreparedModel> PrepareModelWithStatus(const Model& model,
return cb->GetPreparedModel();
}
-android::sp<IPreparedModel> PrepareModel(const Model& model,
+android::sp<IPreparedModel> PrepareModel(const V1_0::Model& model,
armnn_driver::ArmnnDriver& driver)
{
ErrorStatus prepareStatus = ErrorStatus::NONE;
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index e90f7ecf..57541a35 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -72,9 +72,9 @@ android::sp<IMemory> AddPoolAndGetData(uint32_t size, Request& request);
void AddPoolAndSetData(uint32_t size, Request& request, const float* data);
-void AddOperand(Model& model, const Operand& op);
+void AddOperand(V1_0::Model& model, const Operand& op);
-void AddIntOperand(Model& model, int32_t value);
+void AddIntOperand(V1_0::Model& model, int32_t value);
template<typename T>
OperandType TypeToOperandType();
@@ -86,7 +86,7 @@ template<>
OperandType TypeToOperandType<int32_t>();
template<typename T>
-void AddTensorOperand(Model& model, hidl_vec<uint32_t> dimensions, T* values)
+void AddTensorOperand(V1_0::Model& model, hidl_vec<uint32_t> dimensions, T* values)
{
uint32_t totalElements = 1;
for (uint32_t dim : dimensions)
@@ -113,14 +113,14 @@ void AddTensorOperand(Model& model, hidl_vec<uint32_t> dimensions, T* values)
AddOperand(model, op);
}
-void AddInputOperand(Model& model, hidl_vec<uint32_t> dimensions);
+void AddInputOperand(V1_0::Model& model, hidl_vec<uint32_t> dimensions);
-void AddOutputOperand(Model& model, hidl_vec<uint32_t> dimensions);
+void AddOutputOperand(V1_0::Model& model, hidl_vec<uint32_t> dimensions);
-android::sp<IPreparedModel> PrepareModel(const Model& model,
+android::sp<IPreparedModel> PrepareModel(const V1_0::Model& model,
armnn_driver::ArmnnDriver& driver);
-android::sp<IPreparedModel> PrepareModelWithStatus(const Model& model,
+android::sp<IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
armnn_driver::ArmnnDriver& driver,
ErrorStatus & prepareStatus,
ErrorStatus expectedStatus=ErrorStatus::NONE);
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index ea6c8715..4feda30b 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -19,7 +19,7 @@ BOOST_AUTO_TEST_CASE(FullyConnected)
// but that uses slightly weird dimensions which I don't think we need to support for now
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model = {};
+ V1_0::Model model = {};
// add operands
int32_t actValue = 0;
@@ -34,7 +34,7 @@ BOOST_AUTO_TEST_CASE(FullyConnected)
// make the fully connected operation
model.operations.resize(1);
- model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
@@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
sup = supported;
};
- Model model = {};
+ V1_0::Model model = {};
// operands
int32_t actValue = 0;
@@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
model.operations.resize(1);
- model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
@@ -177,7 +177,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
sup = supported;
};
- Model model = {};
+ V1_0::Model model = {};
// operands
int32_t actValue = 0;
@@ -200,7 +200,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
model.operations.resize(1);
- model.operations[0].type = OperationType::FULLY_CONNECTED;
+ model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[0].inputs = hidl_vec<uint32_t>{0,1,2,3};
model.operations[0].outputs = hidl_vec<uint32_t>{4};
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index 5c6c041d..7116f0b0 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
sup = supported;
};
- Model model1 = {};
+ V1_0::Model model1 = {};
// add operands
int32_t actValue = 0;
@@ -40,14 +40,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
// make a correct fully connected operation
model1.operations.resize(2);
- model1.operations[0].type = OperationType::FULLY_CONNECTED;
+ model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED;
model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1, 2, 3};
model1.operations[0].outputs = hidl_vec<uint32_t>{4};
// make an incorrect fully connected operation
AddIntOperand(model1, actValue);
AddOutputOperand(model1, hidl_vec<uint32_t>{1, 1});
- model1.operations[1].type = OperationType::FULLY_CONNECTED;
+ model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED;
model1.operations[1].inputs = hidl_vec<uint32_t>{4};
model1.operations[1].outputs = hidl_vec<uint32_t>{5};
@@ -57,7 +57,7 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(sup[1] == false);
// Broadcast add/mul are not supported
- Model model2 = {};
+ V1_0::Model model2 = {};
AddInputOperand(model2, hidl_vec<uint32_t>{1, 1, 3, 4});
AddInputOperand(model2, hidl_vec<uint32_t>{4});
@@ -66,11 +66,11 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
model2.operations.resize(2);
- model2.operations[0].type = OperationType::ADD;
+ model2.operations[0].type = V1_0::OperationType::ADD;
model2.operations[0].inputs = hidl_vec<uint32_t>{0,1};
model2.operations[0].outputs = hidl_vec<uint32_t>{2};
- model2.operations[1].type = OperationType::MUL;
+ model2.operations[1].type = V1_0::OperationType::MUL;
model2.operations[1].inputs = hidl_vec<uint32_t>{0,1};
model2.operations[1].outputs = hidl_vec<uint32_t>{3};
@@ -79,14 +79,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(sup[0] == false);
BOOST_TEST(sup[1] == false);
- Model model3 = {};
+ V1_0::Model model3 = {};
// Add unsupported operation, should return no error but we don't support it
AddInputOperand(model3, hidl_vec<uint32_t>{1, 1, 1, 8});
AddIntOperand(model3, 2);
AddOutputOperand(model3, hidl_vec<uint32_t>{1, 2, 2, 2});
model3.operations.resize(1);
- model3.operations[0].type = OperationType::DEPTH_TO_SPACE;
+ model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE;
model1.operations[0].inputs = hidl_vec<uint32_t>{0, 1};
model3.operations[0].outputs = hidl_vec<uint32_t>{2};
@@ -95,10 +95,10 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations)
BOOST_TEST(sup[0] == false);
// Add invalid operation
- Model model4 = {};
+ V1_0::Model model4 = {};
AddIntOperand(model4, 0);
model4.operations.resize(1);
- model4.operations[0].type = static_cast<OperationType>(100);
+ model4.operations[0].type = static_cast<V1_0::OperationType>(100);
model4.operations[0].outputs = hidl_vec<uint32_t>{0};
driver->getSupportedOperations(model4, cb);
@@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
sup = supported;
};
- Model model = {};
+ V1_0::Model model = {};
// operands
int32_t actValue = 0;
@@ -146,17 +146,17 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
model.operations.resize(3);
// unsupported
- model.operations[0].type = OperationType::ADD;
+ model.operations[0].type = V1_0::OperationType::ADD;
model.operations[0].inputs = hidl_vec<uint32_t>{0,1};
model.operations[0].outputs = hidl_vec<uint32_t>{2};
// supported
- model.operations[1].type = OperationType::FULLY_CONNECTED;
+ model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED;
model.operations[1].inputs = hidl_vec<uint32_t>{3, 4, 5, 6};
model.operations[1].outputs = hidl_vec<uint32_t>{7};
// unsupported
- model.operations[2].type = OperationType::MUL;
+ model.operations[2].type = V1_0::OperationType::MUL;
model.operations[2].inputs = hidl_vec<uint32_t>{0,1};
model.operations[2].outputs = hidl_vec<uint32_t>{8};
@@ -184,7 +184,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
sup = supported;
};
- Model model = {};
+ V1_0::Model model = {};
model.pools = hidl_vec<hidl_memory>{hidl_memory("Unsuported hidl memory type", nullptr, 0)};
diff --git a/test/Merger.cpp b/test/Merger.cpp
index 6c069a86..48253604 100644
--- a/test/Merger.cpp
+++ b/test/Merger.cpp
@@ -25,7 +25,7 @@ MergerTestImpl(const std::vector<const TestTensor*> & inputs,
ErrorStatus expectedExecStatus=ErrorStatus::NONE)
{
std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
- Model model{};
+ V1_0::Model model{};
hidl_vec<uint32_t> modelInputIds;
modelInputIds.resize(inputs.size()+1);
@@ -40,7 +40,7 @@ MergerTestImpl(const std::vector<const TestTensor*> & inputs,
// make the concat operation
model.operations.resize(1);
- model.operations[0].type = OperationType::CONCATENATION;
+ model.operations[0].type = V1_0::OperationType::CONCATENATION;
model.operations[0].inputs = modelInputIds;
model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
diff --git a/test/Tests.cpp b/test/Tests.cpp
index 37aece7c..3fa8e125 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -31,9 +31,9 @@ BOOST_AUTO_TEST_CASE(TestCapabilities)
auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
ErrorStatus error;
- Capabilities cap;
+ V1_0::Capabilities cap;
- ArmnnDriver::getCapabilities_cb cb = [&](ErrorStatus status, const Capabilities& capabilities)
+ ArmnnDriver::getCapabilities_cb cb = [&](ErrorStatus status, const V1_0::Capabilities& capabilities)
{
error = status;
cap = capabilities;
diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp
index b429920c..e7e6cde7 100644
--- a/test/UtilsTests.cpp
+++ b/test/UtilsTests.cpp
@@ -95,7 +95,7 @@ public:
}
std::string m_RequestInputsAndOutputsDumpDir;
- Model m_Model;
+ V1_0::Model m_Model;
private:
std::string m_FileName;