aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-09-03 13:50:50 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-18 12:40:38 +0100
commite48bdff741568236d3c0747ad3d18a8eba5b36dd (patch)
tree77aabce6f75d86d3f2f3924f342292ae5a7267e7
parenta15dc11fd7bf3ad49e752ec75157b731287fe46d (diff)
downloadandroid-nn-driver-e48bdff741568236d3c0747ad3d18a8eba5b36dd.tar.gz
IVGCVSW-1806 Refactored Android-NN-Driver, added common "getCapabilities",
"getSupportedOperations" and "prepareModel" implementations * Added common base ArmnnDriverImpl class * Added common template implementation of the driver's "getCapabilities", "getSupportedOperations" and "prepareModel" methods * Refactored ArmnnPreparedModel and RequestThread to support HAL v1.1 models * Moved "getStatus" to the common base class, as it is shared by both HAL implementations * Refactored the code where necessary Change-Id: I747334730026d63b4002662523fb93608f67c899
-rw-r--r--1.0/ArmnnDriver.hpp26
-rw-r--r--1.0/ArmnnDriverImpl.hpp41
-rw-r--r--1.1/ArmnnDriver.hpp63
-rw-r--r--1.1/ArmnnDriverImpl.cpp151
-rw-r--r--1.1/ArmnnDriverImpl.hpp39
-rw-r--r--Android.mk5
-rw-r--r--ArmnnDevice.cpp2
-rw-r--r--ArmnnDriverImpl.cpp (renamed from 1.0/ArmnnDriverImpl.cpp)105
-rw-r--r--ArmnnDriverImpl.hpp62
-rw-r--r--ArmnnPreparedModel.cpp49
-rw-r--r--ArmnnPreparedModel.hpp21
-rw-r--r--ModelToINetworkConverter.cpp5
-rw-r--r--ModelToINetworkConverter.hpp17
-rw-r--r--RequestThread.cpp32
-rw-r--r--RequestThread.hpp9
-rw-r--r--Utils.cpp44
-rw-r--r--Utils.hpp49
17 files changed, 303 insertions, 417 deletions
diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp
index 83484ca9..18e25968 100644
--- a/1.0/ArmnnDriver.hpp
+++ b/1.0/ArmnnDriver.hpp
@@ -7,8 +7,8 @@
#include <HalInterfaces.h>
-#include "ArmnnDriverImpl.hpp"
#include "ArmnnDevice.hpp"
+#include "../ArmnnDriverImpl.hpp"
#include <log/log.h>
@@ -29,36 +29,44 @@ public:
public:
Return<void> getCapabilities(
- ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb)
+ ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override
{
ALOGV("V1_0::ArmnnDriver::getCapabilities()");
- return ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getCapabilities(m_Runtime,
+ cb);
}
Return<void> getSupportedOperations(
const ::android::hardware::neuralnetworks::V1_0::Model& model,
- ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb)
+ ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override
{
ALOGV("V1_0::ArmnnDriver::getSupportedOperations()");
- return ArmnnDriverImpl::getSupportedOperations(m_Runtime, m_Options, model, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
}
Return<ErrorStatus> prepareModel(
const ::android::hardware::neuralnetworks::V1_0::Model& model,
- const android::sp<IPreparedModelCallback>& cb)
+ const android::sp<IPreparedModelCallback>& cb) override
{
ALOGV("V1_0::ArmnnDriver::prepareModel()");
- return ArmnnDriverImpl::prepareModel(m_Runtime, m_ClTunedParameters, m_Options, model, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb);
}
- Return<DeviceStatus> getStatus()
+ Return<DeviceStatus> getStatus() override
{
ALOGV("V1_0::ArmnnDriver::getStatus()");
- return ArmnnDriverImpl::getStatus();
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getStatus();
}
};
diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp
deleted file mode 100644
index 2628682d..00000000
--- a/1.0/ArmnnDriverImpl.hpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#pragma once
-
-#include <HalInterfaces.h>
-
-#include "DriverOptions.hpp"
-
-#include <armnn/ArmNN.hpp>
-
-namespace armnn_driver
-{
-namespace V1_0
-{
-
-class ArmnnDriverImpl
-{
-public:
- static Return<void> getCapabilities(
- const armnn::IRuntimePtr& runtime,
- ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb);
- static Return<void> getSupportedOperations(
- const armnn::IRuntimePtr& runtime,
- const DriverOptions& options,
- const ::android::hardware::neuralnetworks::V1_0::Model& model,
- ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb);
- static Return<ErrorStatus> prepareModel(
- const armnn::IRuntimePtr& runtime,
- const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
- const DriverOptions& options,
- const ::android::hardware::neuralnetworks::V1_0::Model& model,
- const android::sp<IPreparedModelCallback>& cb,
- bool float32ToFloat16 = false);
- static Return<DeviceStatus> getStatus();
-};
-
-} // namespace armnn_driver::V1_0
-} // namespace armnn_driver
diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp
index 6bd8e03c..f55aad4b 100644
--- a/1.1/ArmnnDriver.hpp
+++ b/1.1/ArmnnDriver.hpp
@@ -8,13 +8,14 @@
#include <HalInterfaces.h>
#include "ArmnnDevice.hpp"
-#include "1.0/ArmnnDriverImpl.hpp"
-#include "1.1/ArmnnDriverImpl.hpp"
+#include "../ArmnnDriverImpl.hpp"
#include <log/log.h>
-namespace armnn_driver {
-namespace V1_1 {
+namespace armnn_driver
+{
+namespace V1_1
+{
class ArmnnDriver : public ArmnnDevice, public ::android::hardware::neuralnetworks::V1_1::IDevice
{
@@ -28,74 +29,88 @@ public:
public:
Return<void> getCapabilities(
- ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb)
+ ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb cb) override
{
ALOGV("V1_1::ArmnnDriver::getCapabilities()");
- return V1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getCapabilities(m_Runtime,
+ cb);
}
Return<void> getSupportedOperations(
const ::android::hardware::neuralnetworks::V1_0::Model& model,
- ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb)
+ ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb) override
{
ALOGV("V1_1::ArmnnDriver::getSupportedOperations()");
- return V1_0::ArmnnDriverImpl::getSupportedOperations(m_Runtime, m_Options, model, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
}
Return<ErrorStatus> prepareModel(
const ::android::hardware::neuralnetworks::V1_0::Model& model,
- const android::sp<IPreparedModelCallback>& cb)
+ const android::sp<IPreparedModelCallback>& cb) override
{
ALOGV("V1_1::ArmnnDriver::prepareModel()");
- return V1_0::ArmnnDriverImpl::prepareModel(m_Runtime, m_ClTunedParameters, m_Options, model, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_0>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb);
}
Return<void> getCapabilities_1_1(
- ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb)
+ ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb) override
{
ALOGV("V1_1::ArmnnDriver::getCapabilities_1_1()");
- return V1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getCapabilities(m_Runtime,
+ cb);
}
Return<void> getSupportedOperations_1_1(
const ::android::hardware::neuralnetworks::V1_1::Model& model,
- ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb)
+ ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
{
ALOGV("V1_1::ArmnnDriver::getSupportedOperations_1_1()");
- return V1_1::ArmnnDriverImpl::getSupportedOperations_1_1(m_Runtime, m_Options, model, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
}
Return<ErrorStatus> prepareModel_1_1(
const ::android::hardware::neuralnetworks::V1_1::Model& model,
::android::hardware::neuralnetworks::V1_1::ExecutionPreference preference,
- const android::sp<IPreparedModelCallback>& cb)
+ const android::sp<IPreparedModelCallback>& cb) override
{
- using namespace ::android::hardware::neuralnetworks::V1_0;
-
ALOGV("V1_1::ArmnnDriver::prepareModel_1_1()");
- if(!(preference == ExecutionPreference::LOW_POWER ||
- preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
- preference == ExecutionPreference::SUSTAINED_SPEED))
+ if (!(preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED))
{
- ALOGV("V1_1::ArmnnDriver::prepareModel_1_1(): Invalid execution preference");
+ ALOGV("V1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
return ErrorStatus::INVALID_ARGUMENT;
}
- return V1_1::ArmnnDriverImpl::prepareModel_1_1(m_Runtime, m_ClTunedParameters, m_Options, model, cb);
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb);
}
- Return<DeviceStatus> getStatus()
+ Return<DeviceStatus> getStatus() override
{
ALOGV("V1_1::ArmnnDriver::getStatus()");
- return V1_0::ArmnnDriverImpl::getStatus();
+ return armnn_driver::ArmnnDriverImpl<HalVersion_1_1>::getStatus();
}
};
diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp
deleted file mode 100644
index a5e32766..00000000
--- a/1.1/ArmnnDriverImpl.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#include "ArmnnDriverImpl.hpp"
-#include "../1.0/ArmnnDriverImpl.hpp"
-
-#include <OperationsUtils.h>
-
-#include <log/log.h>
-#include <boost/assert.hpp>
-
-#include <ValidateHal.h>
-
-using namespace std;
-using namespace android;
-using namespace android::nn;
-using namespace android::hardware;
-
-namespace
-{
-
-void NotifyCallbackAndCheck(const sp<IPreparedModelCallback>& callback,
- ErrorStatus errorStatus,
- const sp<IPreparedModel>& preparedModelPtr)
-{
- Return<void> returned = callback->notify(errorStatus, preparedModelPtr);
- // This check is required, if the callback fails and it isn't checked it will bring down the service
- if (!returned.isOk())
- {
- ALOGE("V1_1::ArmnnDriverImpl::prepareModel_1_1: hidl callback failed to return properly: %s ",
- returned.description().c_str());
- }
-}
-
-Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
- const string& message,
- const sp<IPreparedModelCallback>& callback)
-{
- ALOGW("V1_1::ArmnnDriverImpl::prepareModel_1_1: %s", message.c_str());
- NotifyCallbackAndCheck(callback, error, nullptr);
- return error;
-}
-
-} // namespace
-
-namespace armnn_driver
-{
-namespace V1_1
-{
-
-Return<void> ArmnnDriverImpl::getCapabilities_1_1(
- const armnn::IRuntimePtr& runtime,
- neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb)
-{
- ALOGV("V1_1::ArmnnDriverImpl::getCapabilities_1_1()");
-
- neuralnetworks::V1_0::IDevice::getCapabilities_cb cb_1_0 =
- [&](ErrorStatus status, const neuralnetworks::V1_0::Capabilities& capabilities)
- {
- BOOST_ASSERT_MSG(compliantWithV1_1(capabilities),
- "V1_1::ArmnnDriverImpl: V1_0::Capabilities not compliant with V1_1::Capabilities");
-
- cb(status, convertToV1_1(capabilities));
- };
-
- V1_0::ArmnnDriverImpl::getCapabilities(runtime, cb_1_0);
-
- return Void();
-}
-
-Return<void> ArmnnDriverImpl::getSupportedOperations_1_1(
- const armnn::IRuntimePtr& runtime,
- const DriverOptions& options,
- const neuralnetworks::V1_1::Model& model,
- neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb)
-{
- ALOGV("V1_1::ArmnnDriverImpl::getSupportedOperations_1_1()");
-
- if(compliantWithV1_0(model))
- {
- V1_0::ArmnnDriverImpl::getSupportedOperations(runtime, options, convertToV1_0(model), cb);
- }
- else
- {
- std::vector<bool> result;
-
- if (!runtime)
- {
- ALOGW("V1_1::ArmnnDriverImpl::getSupportedOperations_1_1: Device unavailable");
- cb(ErrorStatus::DEVICE_UNAVAILABLE, result);
- return Void();
- }
-
- if (!android::nn::validateModel(model))
- {
- ALOGW("V1_1::ArmnnDriverImpl::getSupportedOperations_1_1: Invalid model passed as input");
- cb(ErrorStatus::INVALID_ARGUMENT, result);
- return Void();
- }
-
- result.assign(model.operations.size(), false);
- cb(ErrorStatus::NONE, result);
- }
-
- return Void();
-}
-
-Return<ErrorStatus> ArmnnDriverImpl::prepareModel_1_1(
- const armnn::IRuntimePtr& runtime,
- const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
- const DriverOptions& options,
- const neuralnetworks::V1_1::Model& model,
- const sp<IPreparedModelCallback>& cb)
-{
- ALOGV("V1_1::ArmnnDriverImpl::prepareModel_1_1()");
-
- if(compliantWithV1_0(model))
- {
- return V1_0::ArmnnDriverImpl::prepareModel(runtime, clTunedParameters, options, convertToV1_0(model), cb,
- model.relaxComputationFloat32toFloat16 && options.GetFp16Enabled());
- }
- else
- {
- if (cb.get() == nullptr)
- {
- ALOGW("V1_1::ArmnnDriverImpl::prepareModel_1_1: Invalid callback passed to prepareModel");
- return ErrorStatus::INVALID_ARGUMENT;
- }
-
- if (!runtime)
- {
- return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE,
- "V1_1::ArmnnDriverImpl::prepareModel_1_1: Device unavailable", cb);
- }
-
- if (!android::nn::validateModel(model))
- {
- return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT,
- "V1_1::ArmnnDriverImpl::prepareModel_1_1: Invalid model passed as input", cb);
- }
-
- FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
- "V1_1::ArmnnDriverImpl::prepareModel_1_1: Unsupported model", cb);
- return ErrorStatus::NONE;
- }
-}
-
-} // armnn_driver::namespace V1_1
-} // namespace armnn_driver
diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp
deleted file mode 100644
index 307d96bf..00000000
--- a/1.1/ArmnnDriverImpl.hpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// See LICENSE file in the project root for full license information.
-//
-
-#pragma once
-
-#include <HalInterfaces.h>
-
-#include "DriverOptions.hpp"
-
-#include <armnn/ArmNN.hpp>
-
-namespace armnn_driver
-{
-namespace V1_1
-{
-
-class ArmnnDriverImpl
-{
-public:
- static Return<void> getCapabilities_1_1(
- const armnn::IRuntimePtr& runtime,
- ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb cb);
- static Return<void> getSupportedOperations_1_1(
- const armnn::IRuntimePtr& runtime,
- const DriverOptions& options,
- const ::android::hardware::neuralnetworks::V1_1::Model& model,
- ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb cb);
- static Return<ErrorStatus> prepareModel_1_1(
- const armnn::IRuntimePtr& runtime,
- const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
- const DriverOptions& options,
- const ::android::hardware::neuralnetworks::V1_1::Model& model,
- const android::sp<IPreparedModelCallback>& cb);
-};
-
-} // namespace armnn_driver::V1_1
-} // namespace armnn_driver
diff --git a/Android.mk b/Android.mk
index 960a2f6e..38450705 100644
--- a/Android.mk
+++ b/Android.mk
@@ -47,7 +47,7 @@ LOCAL_CFLAGS+= \
endif # ARMNN_DRIVER_DEBUG == 1
LOCAL_SRC_FILES := \
- 1.0/ArmnnDriverImpl.cpp \
+ ArmnnDriverImpl.cpp \
DriverOptions.cpp \
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
@@ -118,8 +118,7 @@ LOCAL_CFLAGS+= \
endif # ARMNN_DRIVER_DEBUG == 1
LOCAL_SRC_FILES := \
- 1.0/ArmnnDriverImpl.cpp \
- 1.1/ArmnnDriverImpl.cpp \
+ ArmnnDriverImpl.cpp \
DriverOptions.cpp \
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
diff --git a/ArmnnDevice.cpp b/ArmnnDevice.cpp
index 3e0b0da2..81e8eaca 100644
--- a/ArmnnDevice.cpp
+++ b/ArmnnDevice.cpp
@@ -3,6 +3,8 @@
// See LICENSE file in the project root for full license information.
//
+#define LOG_TAG "ArmnnDriver"
+
#include "ArmnnDevice.hpp"
#include <OperationsUtils.h>
diff --git a/1.0/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 21a4f2e3..0298f3b7 100644
--- a/1.0/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -3,6 +3,8 @@
// See LICENSE file in the project root for full license information.
//
+#define LOG_TAG "ArmnnDriver"
+
#include "ArmnnDriverImpl.hpp"
#include "ModelToINetworkConverter.hpp"
#include "ArmnnPreparedModel.hpp"
@@ -37,7 +39,7 @@ void NotifyCallbackAndCheck(const sp<IPreparedModelCallback>& callback,
// This check is required, if the callback fails and it isn't checked it will bring down the service
if (!returned.isOk())
{
- ALOGE("V1_0::ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ",
+ ALOGE("ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ",
returned.description().c_str());
}
}
@@ -46,7 +48,7 @@ Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
const string& message,
const sp<IPreparedModelCallback>& callback)
{
- ALOGW("V1_0::ArmnnDriverImpl::prepareModel: %s", message.c_str());
+ ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str());
NotifyCallbackAndCheck(callback, error, nullptr);
return error;
}
@@ -55,16 +57,15 @@ Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
namespace armnn_driver
{
-namespace V1_0
-{
-Return<void> ArmnnDriverImpl::getCapabilities(
+template <typename HalVersion>
+Return<void> ArmnnDriverImpl<HalVersion>::getCapabilities(
const armnn::IRuntimePtr& runtime,
- neuralnetworks::V1_0::IDevice::getCapabilities_cb cb)
+ HalGetCapabilities_cb cb)
{
- ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()");
+ ALOGV("ArmnnDriverImpl::getCapabilities()");
- neuralnetworks::V1_0::Capabilities capabilities;
+ HalCapabilities capabilities;
if (runtime)
{
capabilities.float32Performance.execTime =
@@ -94,13 +95,14 @@ Return<void> ArmnnDriverImpl::getCapabilities(
return Void();
}
-Return<void> ArmnnDriverImpl::getSupportedOperations(
+template <typename HalVersion>
+Return<void> ArmnnDriverImpl<HalVersion>::getSupportedOperations(
const armnn::IRuntimePtr& runtime,
const DriverOptions& options,
- const neuralnetworks::V1_0::Model& model,
- neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb)
+ const HalModel& model,
+ HalGetSupportedOperations_cb cb)
{
- ALOGV("V1_0::ArmnnDriverImpl::getSupportedOperations()");
+ ALOGV("ArmnnDriverImpl::getSupportedOperations()");
vector<bool> result;
@@ -110,7 +112,7 @@ Return<void> ArmnnDriverImpl::getSupportedOperations(
return Void();
}
- // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway
+ // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway.
if (!android::nn::validateModel(model))
{
cb(ErrorStatus::INVALID_ARGUMENT, result);
@@ -118,18 +120,19 @@ Return<void> ArmnnDriverImpl::getSupportedOperations(
}
// Attempt to convert the model to an ArmNN input network (INetwork).
- armnn_driver::ModelToINetworkConverter<HalVersion_1_0> modelConverter(options.GetComputeDevice(),
- model, options.GetForcedUnsupportedOperations());
+ ModelToINetworkConverter<HalVersion> modelConverter(options.GetComputeDevice(),
+ model,
+ options.GetForcedUnsupportedOperations());
if (modelConverter.GetConversionResult() != ConversionResult::Success
- && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
+ && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
{
cb(ErrorStatus::GENERAL_FAILURE, result);
return Void();
}
// Check each operation if it was converted successfully and copy the flags
- // into the result (vector<bool>) that we need to return to Android
+ // into the result (vector<bool>) that we need to return to Android.
result.reserve(model.operations.size());
for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); operationIdx++)
{
@@ -141,48 +144,51 @@ Return<void> ArmnnDriverImpl::getSupportedOperations(
return Void();
}
-Return<ErrorStatus> ArmnnDriverImpl::prepareModel(
+template <typename HalVersion>
+Return<ErrorStatus> ArmnnDriverImpl<HalVersion>::prepareModel(
const armnn::IRuntimePtr& runtime,
const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
const DriverOptions& options,
- const neuralnetworks::V1_0::Model& model,
+ const HalModel& model,
const sp<IPreparedModelCallback>& cb,
- bool float32ToFloat16)
+ bool float32ToFloat16)
{
- ALOGV("V1_0::ArmnnDriverImpl::prepareModel()");
+ ALOGV("ArmnnDriverImpl::prepareModel()");
if (cb.get() == nullptr)
{
- ALOGW("V1_0::ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
+ ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
return ErrorStatus::INVALID_ARGUMENT;
}
if (!runtime)
{
return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE,
- "V1_0::ArmnnDriverImpl::prepareModel: Device unavailable", cb);
+ "ArmnnDriverImpl::prepareModel: Device unavailable", cb);
}
if (!android::nn::validateModel(model))
{
return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT,
- "V1_0::ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb);
+ "ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb);
}
// Deliberately ignore any unsupported operations requested by the options -
// at this point we're being asked to prepare a model that we've already declared support for
// and the operation indices may be different to those in getSupportedOperations anyway.
set<unsigned int> unsupportedOperations;
- armnn_driver::ModelToINetworkConverter<HalVersion_1_0> modelConverter(options.GetComputeDevice(), model,
- unsupportedOperations);
+ ModelToINetworkConverter<HalVersion> modelConverter(options.GetComputeDevice(),
+ model,
+ unsupportedOperations);
if (modelConverter.GetConversionResult() != ConversionResult::Success)
{
- FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
+ "ArmnnDriverImpl::prepareModel: ModelToINetworkConverter failed", cb);
return ErrorStatus::NONE;
}
- // optimize the network
+ // Optimize the network
armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
armnn::OptimizerOptions OptOptions;
OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
@@ -197,7 +203,7 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareModel(
catch (armnn::Exception &e)
{
stringstream message;
- message << "armnn::Exception (" << e.what() << ") caught from optimize.";
+ message << "ArmnnDriverImpl::prepareModel: armnn::Exception (" << e.what() << ") caught from optimize.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
@@ -206,41 +212,39 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareModel(
if (!optNet)
{
FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
- "V1_0::ArmnnDriverImpl::prepareModel: Invalid optimized network", cb);
+ "ArmnnDriverImpl::prepareModel: Invalid optimized network", cb);
return ErrorStatus::NONE;
}
// Export the optimized network graph to a dot file if an output dump directory
// has been specified in the drivers' arguments.
- ExportNetworkGraphToDotFile(*optNet,
- options.GetRequestInputsAndOutputsDumpDir(),
- model);
+ ExportNetworkGraphToDotFile<HalModel>(*optNet, options.GetRequestInputsAndOutputsDumpDir(), model);
- // load it into the runtime
+ // Load it into the runtime.
armnn::NetworkId netId = 0;
try
{
if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
{
return FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
- "V1_0::ArmnnDriverImpl::prepareModel: Network could not be loaded", cb);
+ "ArmnnDriverImpl::prepareModel: Network could not be loaded", cb);
}
}
catch (armnn::Exception& e)
{
stringstream message;
- message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
+ message << "ArmnnDriverImpl::prepareModel: armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
return ErrorStatus::NONE;
}
- unique_ptr<ArmnnPreparedModel> preparedModel(new ArmnnPreparedModel(
- netId,
- runtime.get(),
- model,
- options.GetRequestInputsAndOutputsDumpDir(),
- options.IsGpuProfilingEnabled()
- ));
+ unique_ptr<ArmnnPreparedModel<HalVersion>> preparedModel(
+ new ArmnnPreparedModel<HalVersion>(
+ netId,
+ runtime.get(),
+ model,
+ options.GetRequestInputsAndOutputsDumpDir(),
+ options.IsGpuProfilingEnabled()));
// Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
// this is enabled) before the first 'real' inference which removes the overhead of the first inference.
@@ -256,7 +260,7 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareModel(
}
catch (const armnn::Exception& error)
{
- ALOGE("V1_0::ArmnnDriverImpl: Failed to save CL tuned parameters file '%s': %s",
+ ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
options.GetClTunedParametersFile().c_str(), error.what());
}
}
@@ -266,12 +270,19 @@ Return<ErrorStatus> ArmnnDriverImpl::prepareModel(
return ErrorStatus::NONE;
}
-Return<DeviceStatus> ArmnnDriverImpl::getStatus()
+template <typename HalVersion>
+Return<DeviceStatus> ArmnnDriverImpl<HalVersion>::getStatus()
{
- ALOGV("V1_0::ArmnnDriverImpl::getStatus()");
+ ALOGV("ArmnnDriver::getStatus()");
return DeviceStatus::AVAILABLE;
}
-} // armnn_driver::namespace V1_0
+// Class template specializations
+template class ArmnnDriverImpl<HalVersion_1_0>;
+
+#ifdef ARMNN_ANDROID_NN_V1_1
+template class ArmnnDriverImpl<HalVersion_1_1>;
+#endif
+
} // namespace armnn_driver
diff --git a/ArmnnDriverImpl.hpp b/ArmnnDriverImpl.hpp
new file mode 100644
index 00000000..c0600977
--- /dev/null
+++ b/ArmnnDriverImpl.hpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn_driver
+{
+
+struct HalVersion_1_0
+{
+ using Model = ::android::hardware::neuralnetworks::V1_0::Model;
+ using Capabilities = ::android::hardware::neuralnetworks::V1_0::Capabilities;
+ using getCapabilities_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getCapabilities_cb;
+ using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_0::IDevice::getSupportedOperations_cb;
+};
+
+#if defined(ARMNN_ANDROID_NN_V1_1)
+struct HalVersion_1_1
+{
+ using Model = ::android::hardware::neuralnetworks::V1_1::Model;
+ using Capabilities = ::android::hardware::neuralnetworks::V1_1::Capabilities;
+ using getCapabilities_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getCapabilities_1_1_cb;
+ using getSupportedOperations_cb = ::android::hardware::neuralnetworks::V1_1::IDevice::getSupportedOperations_1_1_cb;
+};
+#endif
+
+template <typename HalVersion>
+class ArmnnDriverImpl
+{
+public:
+ using HalModel = typename HalVersion::Model;
+ using HalCapabilities = typename HalVersion::Capabilities;
+ using HalGetCapabilities_cb = typename HalVersion::getCapabilities_cb;
+ using HalGetSupportedOperations_cb = typename HalVersion::getSupportedOperations_cb;
+
+ static Return<void> getCapabilities(
+ const armnn::IRuntimePtr& runtime,
+ HalGetCapabilities_cb cb);
+ static Return<void> getSupportedOperations(
+ const armnn::IRuntimePtr& runtime,
+ const DriverOptions& options,
+ const HalModel& model,
+ HalGetSupportedOperations_cb);
+ static Return<ErrorStatus> prepareModel(
+ const armnn::IRuntimePtr& runtime,
+ const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+ const DriverOptions& options,
+ const HalModel& model,
+ const android::sp<IPreparedModelCallback>& cb,
+ bool float32ToFloat16 = false);
+ static Return<DeviceStatus> getStatus();
+};
+
+} // namespace armnn_driver
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index d338fdc8..7cbbcbcb 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -81,18 +81,20 @@ inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t ind
return tensorNamePrefix + std::to_string(index);
}
-}
+} // anonymous namespace
using namespace android::hardware;
namespace armnn_driver
{
-RequestThread ArmnnPreparedModel::m_RequestThread;
+template<typename HalVersion>
+RequestThread<HalVersion> ArmnnPreparedModel<HalVersion>::m_RequestThread;
+template<typename HalVersion>
template <typename TensorBindingCollection>
-void ArmnnPreparedModel::DumpTensorsIfRequired(char const* tensorNamePrefix,
- const TensorBindingCollection& tensorBindings)
+void ArmnnPreparedModel<HalVersion>::DumpTensorsIfRequired(char const* tensorNamePrefix,
+ const TensorBindingCollection& tensorBindings)
{
if (!m_RequestInputsAndOutputsDumpDir.empty())
{
@@ -107,11 +109,12 @@ void ArmnnPreparedModel::DumpTensorsIfRequired(char const* tensorNamePrefix,
}
}
-ArmnnPreparedModel::ArmnnPreparedModel(armnn::NetworkId networkId,
- armnn::IRuntime* runtime,
- const neuralnetworks::V1_0::Model& model,
- const std::string& requestInputsAndOutputsDumpDir,
- const bool gpuProfilingEnabled)
+template<typename HalVersion>
+ArmnnPreparedModel<HalVersion>::ArmnnPreparedModel(armnn::NetworkId networkId,
+ armnn::IRuntime* runtime,
+ const HalModel& model,
+ const std::string& requestInputsAndOutputsDumpDir,
+ const bool gpuProfilingEnabled)
: m_NetworkId(networkId)
, m_Runtime(runtime)
, m_Model(model)
@@ -123,7 +126,8 @@ ArmnnPreparedModel::ArmnnPreparedModel(armnn::NetworkId networkId,
m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
}
-ArmnnPreparedModel::~ArmnnPreparedModel()
+template<typename HalVersion>
+ArmnnPreparedModel<HalVersion>::~ArmnnPreparedModel()
{
// Get a hold of the profiler used by this model.
std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
@@ -135,8 +139,9 @@ ArmnnPreparedModel::~ArmnnPreparedModel()
DumpJsonProfilingIfRequired(m_GpuProfilingEnabled, m_RequestInputsAndOutputsDumpDir, m_NetworkId, profiler.get());
}
-Return<ErrorStatus> ArmnnPreparedModel::execute(const Request& request,
- const ::android::sp<IExecutionCallback>& callback)
+template<typename HalVersion>
+Return<ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(const Request& request,
+ const ::android::sp<IExecutionCallback>& callback)
{
ALOGV("ArmnnPreparedModel::execute(): %s", GetModelSummary(m_Model).c_str());
m_RequestCount++;
@@ -220,10 +225,12 @@ Return<ErrorStatus> ArmnnPreparedModel::execute(const Request& request,
return ErrorStatus::NONE; // successfully queued
}
-void ArmnnPreparedModel::ExecuteGraph(std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
- std::shared_ptr<armnn::InputTensors>& pInputTensors,
- std::shared_ptr<armnn::OutputTensors>& pOutputTensors,
- const ::android::sp<IExecutionCallback>& callback)
+template<typename HalVersion>
+void ArmnnPreparedModel<HalVersion>::ExecuteGraph(
+ std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
+ std::shared_ptr<armnn::InputTensors>& pInputTensors,
+ std::shared_ptr<armnn::OutputTensors>& pOutputTensors,
+ const ::android::sp<IExecutionCallback>& callback)
{
ALOGV("ArmnnPreparedModel::ExecuteGraph(...)");
@@ -254,7 +261,8 @@ void ArmnnPreparedModel::ExecuteGraph(std::shared_ptr<std::vector<::android::nn:
NotifyCallbackAndCheck(callback, ErrorStatus::NONE, "ExecuteGraph");
}
-void ArmnnPreparedModel::ExecuteWithDummyInputs()
+template<typename HalVersion>
+void ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs()
{
std::vector<std::vector<char>> storage;
armnn::InputTensors inputTensors;
@@ -287,4 +295,11 @@ void ArmnnPreparedModel::ExecuteWithDummyInputs()
}
}
+// Class template specializations
+template class ArmnnPreparedModel<HalVersion_1_0>;
+
+#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1.
+template class ArmnnPreparedModel<HalVersion_1_1>;
+#endif
+
} // namespace armnn_driver
diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp
index a700e54d..86c6f5cf 100644
--- a/ArmnnPreparedModel.hpp
+++ b/ArmnnPreparedModel.hpp
@@ -8,6 +8,7 @@
#include "RequestThread.hpp"
#include "ArmnnDriver.hpp"
+#include "ArmnnDriverImpl.hpp"
#include <NeuralNetworks.h>
#include <armnn/ArmNN.hpp>
@@ -18,12 +19,15 @@
namespace armnn_driver
{
+template <typename HalVersion>
class ArmnnPreparedModel : public IPreparedModel
{
public:
+ using HalModel = typename HalVersion::Model;
+
ArmnnPreparedModel(armnn::NetworkId networkId,
armnn::IRuntime* runtime,
- const ::android::hardware::neuralnetworks::V1_0::Model& model,
+ const HalModel& model,
const std::string& requestInputsAndOutputsDumpDir,
const bool gpuProfilingEnabled);
@@ -42,19 +46,18 @@ public:
void ExecuteWithDummyInputs();
private:
-
template <typename TensorBindingCollection>
void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings);
- armnn::NetworkId m_NetworkId;
- armnn::IRuntime* m_Runtime;
- ::android::hardware::neuralnetworks::V1_0::Model m_Model;
+ armnn::NetworkId m_NetworkId;
+ armnn::IRuntime* m_Runtime;
+ HalModel m_Model;
// There must be a single RequestThread for all ArmnnPreparedModel objects to ensure serial execution of workloads
// It is specific to this class, so it is declared as static here
- static RequestThread m_RequestThread;
- uint32_t m_RequestCount;
- const std::string& m_RequestInputsAndOutputsDumpDir;
- const bool m_GpuProfilingEnabled;
+ static RequestThread<HalVersion> m_RequestThread;
+ uint32_t m_RequestCount;
+ const std::string& m_RequestInputsAndOutputsDumpDir;
+ const bool m_GpuProfilingEnabled;
};
}
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 461a8cdb..6db32a05 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -484,6 +484,7 @@ template<typename HalVersion>
void ModelToINetworkConverter<HalVersion>::Convert()
{
using HalModel = typename HalVersion::Model;
+
ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
// map the memory pool into shared pointers
@@ -2658,8 +2659,8 @@ bool ModelToINetworkConverter<HalVersion>::IsOperationSupported(uint32_t operati
template class ModelToINetworkConverter<HalVersion_1_0>;
-#if defined(ARMNN_ANDROID_NN_V1_1)
+#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
template class ModelToINetworkConverter<HalVersion_1_1>;
#endif
-} // armnn_driver \ No newline at end of file
+} // armnn_driver
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 040bec6b..c28ebdcd 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -6,6 +6,7 @@
#pragma once
#include "ArmnnDriver.hpp"
+#include "ArmnnDriverImpl.hpp"
#include <NeuralNetworks.h>
#include <ActivationFunctor.h>
@@ -33,18 +34,6 @@ enum class ConversionResult
UnsupportedFeature
};
-struct HalVersion_1_0
-{
- using Model = ::android::hardware::neuralnetworks::V1_0::Model;
-};
-
-#if defined(ARMNN_ANDROID_NN_V1_1)
-struct HalVersion_1_1
-{
- using Model = ::android::hardware::neuralnetworks::V1_1::Model;
-};
-#endif
-
// A helper performing the conversion from an AndroidNN driver Model representation,
// to an armnn::INetwork object
template<typename HalVersion>
@@ -54,8 +43,8 @@ public:
using HalModel = typename HalVersion::Model;
ModelToINetworkConverter(armnn::Compute compute,
- const HalModel& model,
- const std::set<unsigned int>& forcedUnsupportedOperations);
+ const HalModel& model,
+ const std::set<unsigned int>& forcedUnsupportedOperations);
ConversionResult GetConversionResult() const { return m_ConversionResult; }
diff --git a/RequestThread.cpp b/RequestThread.cpp
index abaee90c..8e44d8d2 100644
--- a/RequestThread.cpp
+++ b/RequestThread.cpp
@@ -17,13 +17,15 @@ using namespace android;
namespace armnn_driver
{
-RequestThread::RequestThread()
+template<typename HalVersion>
+RequestThread<HalVersion>::RequestThread()
{
ALOGV("RequestThread::RequestThread()");
m_Thread = std::make_unique<std::thread>(&RequestThread::Process, this);
}
-RequestThread::~RequestThread()
+template<typename HalVersion>
+RequestThread<HalVersion>::~RequestThread()
{
ALOGV("RequestThread::~RequestThread()");
@@ -48,11 +50,12 @@ RequestThread::~RequestThread()
catch (const std::exception&) { } // Swallow any exception.
}
-void RequestThread::PostMsg(ArmnnPreparedModel* model,
- std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& memPools,
- std::shared_ptr<armnn::InputTensors>& inputTensors,
- std::shared_ptr<armnn::OutputTensors>& outputTensors,
- const ::android::sp<IExecutionCallback>& callback)
+template<typename HalVersion>
+void RequestThread<HalVersion>::PostMsg(ArmnnPreparedModel<HalVersion>* model,
+ std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& memPools,
+ std::shared_ptr<armnn::InputTensors>& inputTensors,
+ std::shared_ptr<armnn::OutputTensors>& outputTensors,
+ const ::android::sp<IExecutionCallback>& callback)
{
ALOGV("RequestThread::PostMsg(...)");
auto data = std::make_shared<AsyncExecuteData>(model,
@@ -64,7 +67,8 @@ void RequestThread::PostMsg(ArmnnPreparedModel* model,
PostMsg(pMsg);
}
-void RequestThread::PostMsg(std::shared_ptr<ThreadMsg>& pMsg)
+template<typename HalVersion>
+void RequestThread<HalVersion>::PostMsg(std::shared_ptr<ThreadMsg>& pMsg)
{
ALOGV("RequestThread::PostMsg(pMsg)");
// Add a message to the queue and notify the request thread
@@ -73,7 +77,8 @@ void RequestThread::PostMsg(std::shared_ptr<ThreadMsg>& pMsg)
m_Cv.notify_one();
}
-void RequestThread::Process()
+template<typename HalVersion>
+void RequestThread<HalVersion>::Process()
{
ALOGV("RequestThread::Process()");
while (true)
@@ -98,7 +103,7 @@ void RequestThread::Process()
{
ALOGV("RequestThread::Process() - request");
// invoke the asynchronous execution method
- ArmnnPreparedModel* model = pMsg->data->m_Model;
+ ArmnnPreparedModel<HalVersion>* model = pMsg->data->m_Model;
model->ExecuteGraph(pMsg->data->m_MemPools,
pMsg->data->m_InputTensors,
pMsg->data->m_OutputTensors,
@@ -126,5 +131,12 @@ void RequestThread::Process()
}
}
+// Class template specializations
+template class RequestThread<HalVersion_1_0>;
+
+#ifdef ARMNN_ANDROID_NN_V1_1 // Using ::android::hardware::neuralnetworks::V1_1.
+template class RequestThread<HalVersion_1_1>;
+#endif
+
} // namespace armnn_driver
diff --git a/RequestThread.hpp b/RequestThread.hpp
index 2448dbec..41ad213b 100644
--- a/RequestThread.hpp
+++ b/RequestThread.hpp
@@ -11,6 +11,7 @@
#include <condition_variable>
#include "ArmnnDriver.hpp"
+#include "ArmnnDriverImpl.hpp"
#include <CpuExecutor.h>
#include <armnn/ArmNN.hpp>
@@ -18,8 +19,10 @@
namespace armnn_driver
{
+template<typename HalVersion>
class ArmnnPreparedModel;
+template<typename HalVersion>
class RequestThread
{
public:
@@ -35,7 +38,7 @@ public:
/// @param[in] inputTensors pointer to the input tensors for the request
/// @param[in] outputTensors pointer to the output tensors for the request
/// @param[in] callback the android notification callback
- void PostMsg(armnn_driver::ArmnnPreparedModel* model,
+ void PostMsg(armnn_driver::ArmnnPreparedModel<HalVersion>* model,
std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& memPools,
std::shared_ptr<armnn::InputTensors>& inputTensors,
std::shared_ptr<armnn::OutputTensors>& outputTensors,
@@ -48,7 +51,7 @@ private:
/// storage for a prepared model and args for the asyncExecute call
struct AsyncExecuteData
{
- AsyncExecuteData(ArmnnPreparedModel* model,
+ AsyncExecuteData(ArmnnPreparedModel<HalVersion>* model,
std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& memPools,
std::shared_ptr<armnn::InputTensors>& inputTensors,
std::shared_ptr<armnn::OutputTensors>& outputTensors,
@@ -61,7 +64,7 @@ private:
{
}
- armnn_driver::ArmnnPreparedModel* m_Model;
+ armnn_driver::ArmnnPreparedModel<HalVersion>* m_Model;
std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>> m_MemPools;
std::shared_ptr<armnn::InputTensors> m_InputTensors;
std::shared_ptr<armnn::OutputTensors> m_OutputTensors;
diff --git a/Utils.cpp b/Utils.cpp
index 726e130f..79384e01 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -9,13 +9,8 @@
#include <Permute.hpp>
-#include <boost/format.hpp>
-#include <log/log.h>
-
#include <cassert>
#include <cinttypes>
-#include <fstream>
-#include <iomanip>
using namespace android;
using namespace android::hardware;
@@ -289,43 +284,4 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
profiler->Print(fileStream);
}
-void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
- const std::string& dumpDir,
- const neuralnetworks::V1_0::Model& model)
-{
- // The dump directory must exist in advance.
- if (dumpDir.empty())
- {
- return;
- }
-
- // Get the memory address of the model and convert it to a hex string (of at least a '0' character).
- size_t modelAddress = uintptr_t(&model);
- std::stringstream ss;
- ss << std::uppercase << std::hex << std::setfill('0') << std::setw(1) << modelAddress;
- std::string modelAddressHexString = ss.str();
-
- // Set the name of the output .dot file.
- const std::string fileName = boost::str(boost::format("%1%/networkgraph_%2%.dot")
- % dumpDir
- % modelAddressHexString);
-
- ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
-
- // Write the network graph to a dot file.
- std::ofstream fileStream;
- fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
-
- if (!fileStream.good())
- {
- ALOGW("Could not open file %s for writing", fileName.c_str());
- return;
- }
-
- if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
- {
- ALOGW("An error occurred when writing to file %s", fileName.c_str());
- }
-}
-
} // namespace armnn_driver
diff --git a/Utils.hpp b/Utils.hpp
index 5d9f7003..ac90a9ab 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -12,8 +12,13 @@
#include <armnn/ArmNN.hpp>
#include <CpuExecutor.h>
+#include <boost/format.hpp>
+#include <log/log.h>
+
#include <vector>
#include <string>
+#include <fstream>
+#include <iomanip>
namespace armnn_driver
{
@@ -44,8 +49,8 @@ armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
std::string GetOperandSummary(const Operand& operand);
-template <typename Model>
-std::string GetModelSummary(const Model& model)
+template <typename HalModel>
+std::string GetModelSummary(const HalModel& model)
{
std::stringstream result;
@@ -86,8 +91,44 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
armnn::NetworkId networkId,
const armnn::IProfiler* profiler);
+template <typename HalModel>
void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
const std::string& dumpDir,
- const ::android::hardware::neuralnetworks::V1_0::Model& model);
+ const HalModel& model)
+{
+ // The dump directory must exist in advance.
+ if (dumpDir.empty())
+ {
+ return;
+ }
+
+ // Get the memory address of the model and convert it to a hex string (of at least a '0' character).
+ size_t modelAddress = uintptr_t(&model);
+ std::stringstream ss;
+ ss << std::uppercase << std::hex << std::setfill('0') << std::setw(1) << modelAddress;
+ std::string modelAddressHexString = ss.str();
+
+ // Set the name of the output .dot file.
+ const std::string fileName = boost::str(boost::format("%1%/networkgraph_%2%.dot")
+ % dumpDir
+ % modelAddressHexString);
+
+ ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
+
+ // Write the network graph to a dot file.
+ std::ofstream fileStream;
+ fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
-} \ No newline at end of file
+ if (!fileStream.good())
+ {
+ ALOGW("Could not open file %s for writing", fileName.c_str());
+ return;
+ }
+
+ if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
+ {
+ ALOGW("An error occurred when writing to file %s", fileName.c_str());
+ }
+}
+
+}