aboutsummaryrefslogtreecommitdiff
path: root/1.3
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2020-03-26 13:34:14 +0000
committerKevin May <kevin.may@arm.com>2020-03-26 17:39:25 +0000
commit42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3 (patch)
treee5260f4b9e5e36080269243c1f1cd74f5589b206 /1.3
parentcae7e927a5b5559f67bb87a1737f6606d5d6f328 (diff)
downloadandroid-nn-driver-42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3.tar.gz
IVGCVSW-4447 Add Hal 1_3 Support
* Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl * Add new .rc file for 1.3 service * Add ArmnnPreparedModel_1_3 and implement new functions * Update Android.mk with 1.3 driver and service * Refactor ifdef to include ARMNN_ANDROID_NN_V1_3 * Create Utils getMainModel for new 1.3 Model Main Subgraph * Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3 * Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp * Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3
Diffstat (limited to '1.3')
-rw-r--r--1.3/ArmnnDriver.hpp294
-rw-r--r--1.3/ArmnnDriverImpl.cpp338
-rw-r--r--1.3/ArmnnDriverImpl.hpp40
-rw-r--r--1.3/HalPolicy.cpp451
-rw-r--r--1.3/HalPolicy.hpp150
5 files changed, 1273 insertions, 0 deletions
diff --git a/1.3/ArmnnDriver.hpp b/1.3/ArmnnDriver.hpp
new file mode 100644
index 00000000..be355932
--- /dev/null
+++ b/1.3/ArmnnDriver.hpp
@@ -0,0 +1,294 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "../ArmnnDevice.hpp"
+#include "ArmnnDriverImpl.hpp"
+#include "HalPolicy.hpp"
+
+#include "../ArmnnDriverImpl.hpp"
+#include "../1.3/ArmnnDriverImpl.hpp"
+#include "../1.3/HalPolicy.hpp"
+#include "../1.2/ArmnnDriverImpl.hpp"
+#include "../1.2/HalPolicy.hpp"
+#include "../1.1/ArmnnDriverImpl.hpp"
+#include "../1.1/HalPolicy.hpp"
+#include "../1.0/ArmnnDriverImpl.hpp"
+#include "../1.0/HalPolicy.hpp"
+
+#include <log/log.h>
+
+namespace armnn_driver
+{
+namespace hal_1_3
+{
+
+class ArmnnDriver : public ArmnnDevice, public V1_3::IDevice
+{
+public:
+
+ ArmnnDriver(DriverOptions options)
+ : ArmnnDevice(std::move(options))
+ {
+ ALOGV("hal_1_3::ArmnnDriver::ArmnnDriver()");
+ }
+ ~ArmnnDriver() {}
+
+ using HidlToken = android::hardware::hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
+
+public:
+ Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getCapabilities()");
+
+ return hal_1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ V1_0::IDevice::getSupportedOperations_cb cb) override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getSupportedOperations()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<V1_0::ErrorStatus> prepareModel(const V1_0::Model& model,
+ const android::sp<V1_0::IPreparedModelCallback>& cb) override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<void> getCapabilities_1_1(V1_1::IDevice::getCapabilities_1_1_cb cb) override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getCapabilities_1_1()");
+
+ return hal_1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getSupportedOperations_1_1()");
+ return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<V1_0::ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+ V1_1::ExecutionPreference preference,
+ const android::sp<V1_0::IPreparedModelCallback>& cb) override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_1()");
+
+ if (!(preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED))
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
+ cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_0::ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb,
+ model.relaxComputationFloat32toFloat16
+ && m_Options.GetFp16Enabled());
+ }
+
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getCapabilities()");
+
+ return hal_1_2::ArmnnDriverImpl::getCapabilities_1_2(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getSupportedOperations()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_2::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<V1_0::ErrorStatus> prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&, const HidlToken&,
+ const android::sp<V1_2::IPreparedModelCallback>& cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_2()");
+
+ if (!(preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED))
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_2: Invalid execution preference");
+ cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_0::ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ return hal_1_2::ArmnnDriverImpl::prepareArmnnModel_1_2(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb,
+ model.relaxComputationFloat32toFloat16
+ && m_Options.GetFp16Enabled());
+ }
+
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getCapabilities()");
+
+ return hal_1_3::ArmnnDriverImpl::getCapabilities_1_3(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getSupportedOperations()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_3::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<V1_3::ErrorStatus> prepareModel_1_3(const V1_3::Model& model,
+ V1_1::ExecutionPreference preference,
+ V1_3::Priority priority,
+ const V1_3::OptionalTimePoint&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const HidlToken&,
+ const android::sp<V1_3::IPreparedModelCallback>& cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_3()");
+
+ if (!(preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED))
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModel_1_3: Invalid execution preference");
+ cb->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ if (!android::nn::validatePriority(priority)) {
+ cb->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
+ }
+
+
+ return ArmnnDriverImpl::prepareArmnnModel_1_3(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb,
+ model.relaxComputationFloat32toFloat16
+ && m_Options.GetFp16Enabled());
+ }
+
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getSupportedExtensions()");
+ cb(V1_0::ErrorStatus::NONE, {/* No extensions. */});
+ return Void();
+ }
+
+ Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getSupportedExtensions()");
+
+ // Set both numbers to be 0 for cache not supported.
+ cb(V1_0::ErrorStatus::NONE, 0, 0);
+ return Void();
+ }
+
+ Return<DeviceStatus> getStatus() override
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getStatus()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_3::HalPolicy>::getStatus();
+ }
+
+ Return<void> getVersionString(getVersionString_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getVersionString()");
+
+ cb(V1_0::ErrorStatus::NONE, "ArmNN");
+ return Void();
+ }
+
+ Return<void> getType(getType_cb cb)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::getType()");
+
+ cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU);
+ return Void();
+ }
+
+ Return<V1_0::ErrorStatus> prepareModelFromCache(
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const HidlToken&,
+ const sp<V1_2::IPreparedModelCallback>& callback)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModelFromCache()");
+ callback->notify_1_2(V1_0::ErrorStatus::GENERAL_FAILURE, nullptr);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ Return<ErrorStatus> prepareModelFromCache_1_3(
+ V1_3::Priority,
+ const V1_3::OptionalTimePoint&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const HidlToken&,
+ const sp<V1_3::IPreparedModelCallback>& callback)
+ {
+ ALOGV("hal_1_3::ArmnnDriver::prepareModelFromCache()");
+ callback->notify_1_3(ErrorStatus::GENERAL_FAILURE, nullptr);
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+
+ Return<void> supportsDeadlines(supportsDeadlines_cb cb) {
+ // Set both numbers to be false for deadlines not supported.
+ cb(/*prepareModelDeadline=*/false, /*executionDeadline=*/false);
+ return Void();
+ }
+
+ Return<void> allocate(const V1_3::BufferDesc& /*desc*/,
+ const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/,
+ const hidl_vec<V1_3::BufferRole>& /*inputRoles*/,
+ const hidl_vec<V1_3::BufferRole>& /*outputRoles*/,
+ allocate_cb cb) {
+ ALOGV("hal_1_3::ArmnnDriver::allocate()");
+ cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0);
+ return Void();
+ }
+
+};
+
+} // namespace hal_1_3
+} // namespace armnn_driver
diff --git a/1.3/ArmnnDriverImpl.cpp b/1.3/ArmnnDriverImpl.cpp
new file mode 100644
index 00000000..98d038c9
--- /dev/null
+++ b/1.3/ArmnnDriverImpl.cpp
@@ -0,0 +1,338 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ArmnnDriverImpl.hpp"
+#include "../ArmnnPreparedModel_1_3.hpp"
+#include "../ModelToINetworkConverter.hpp"
+#include "../SystemPropertiesUtils.hpp"
+
+#include <log/log.h>
+
+namespace
+{
+
+const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
+const char *g_RelaxedFloat32toFloat16PerformancePowerUsage = "ArmNN.relaxedFloat32toFloat16Performance.powerUsage";
+
+const char *g_OperandTypeTensorFloat32PerformanceExecTime = "Armnn.operandTypeTensorFloat32Performance.execTime";
+const char *g_OperandTypeTensorFloat32PerformancePowerUsage = "Armnn.operandTypeTensorFloat32Performance.powerUsage";
+
+const char *g_OperandTypeFloat32PerformanceExecTime = "Armnn.operandTypeFloat32Performance.execTime";
+const char *g_OperandTypeFloat32PerformancePowerUsage = "Armnn.operandTypeFloat32Performance.powerUsage";
+
+const char *g_OperandTypeTensorFloat16PerformanceExecTime = "Armnn.operandTypeTensorFloat16Performance.execTime";
+const char *g_OperandTypeTensorFloat16PerformancePowerUsage = "Armnn.operandTypeTensorFloat16Performance.powerUsage";
+
+const char *g_OperandTypeFloat16PerformanceExecTime = "Armnn.operandTypeFloat16Performance.execTime";
+const char *g_OperandTypeFloat16PerformancePowerUsage = "Armnn.operandTypeFloat16Performance.powerUsage";
+
+const char *g_OperandTypeTensorQuant8AsymmPerformanceExecTime =
+ "Armnn.operandTypeTensorQuant8AsymmPerformance.execTime";
+const char *g_OperandTypeTensorQuant8AsymmPerformancePowerUsage =
+ "Armnn.operandTypeTensorQuant8AsymmPerformance.powerUsage";
+
+const char *g_OperandTypeTensorQuant8AsymmSignedPerformanceExecTime =
+ "Armnn.operandTypeTensorQuant8AsymmSignedPerformance.execTime";
+const char *g_OperandTypeTensorQuant8AsymmSignedPerformancePowerUsage =
+ "Armnn.operandTypeTensorQuant8AsymmSignedPerformance.powerUsage";
+
+const char *g_OperandTypeTensorQuant16SymmPerformanceExecTime =
+ "Armnn.operandTypeTensorQuant16SymmPerformance.execTime";
+const char *g_OperandTypeTensorQuant16SymmPerformancePowerUsage =
+ "Armnn.operandTypeTensorQuant16SymmPerformance.powerUsage";
+
+const char *g_OperandTypeTensorQuant8SymmPerformanceExecTime =
+ "Armnn.operandTypeTensorQuant8SymmPerformance.execTime";
+const char *g_OperandTypeTensorQuant8SymmPerformancePowerUsage =
+ "Armnn.operandTypeTensorQuant8SymmPerformance.powerUsage";
+
+const char *g_OperandTypeTensorQuant8SymmPerChannelPerformanceExecTime =
+ "Armnn.operandTypeTensorQuant8SymmPerChannelPerformance.execTime";
+const char *g_OperandTypeTensorQuant8SymmPerChannelPerformancePowerUsage =
+ "Armnn.operandTypeTensorQuant8SymmPerChannelPerformance.powerUsage";
+
+
+const char *g_OperandTypeTensorInt32PerformanceExecTime = "Armnn.operandTypeTensorInt32Performance.execTime";
+const char *g_OperandTypeTensorInt32PerformancePowerUsage = "Armnn.operandTypeTensorInt32Performance.powerUsage";
+
+const char *g_OperandTypeInt32PerformanceExecTime = "Armnn.operandTypeInt32Performance.execTime";
+const char *g_OperandTypeInt32PerformancePowerUsage = "Armnn.operandTypeInt32Performance.powerUsage";
+
+
+void NotifyCallbackAndCheck(const sp<V1_3::IPreparedModelCallback>& callback,
+ V1_3::ErrorStatus errorStatus,
+ const sp<V1_3::IPreparedModel>& preparedModelPtr)
+{
+ Return<void> returned = callback->notify_1_3(errorStatus, preparedModelPtr);
+ // This check is required, if the callback fails and it isn't checked it will bring down the service
+ if (!returned.isOk())
+ {
+ ALOGE("ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ",
+ returned.description().c_str());
+ }
+}
+
+Return<V1_3::ErrorStatus> FailPrepareModel(V1_3::ErrorStatus error,
+ const std::string& message,
+ const sp<V1_3::IPreparedModelCallback>& callback)
+{
+ ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str());
+ NotifyCallbackAndCheck(callback, error, nullptr);
+ return error;
+}
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+namespace hal_1_3
+{
+
+Return<V1_3::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_3(
+ const armnn::IRuntimePtr& runtime,
+ const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+ const DriverOptions& options,
+ const V1_3::Model& model,
+ const sp<V1_3::IPreparedModelCallback>& cb,
+ bool float32ToFloat16)
+{
+ ALOGV("ArmnnDriverImpl::prepareArmnnModel_1_3()");
+
+ if (cb.get() == nullptr)
+ {
+ ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
+ return V1_3::ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ if (!runtime)
+ {
+ return FailPrepareModel(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
+ }
+
+ if (!android::nn::validateModel(model))
+ {
+ return FailPrepareModel(V1_3::ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
+ }
+
+ // Deliberately ignore any unsupported operations requested by the options -
+ // at this point we're being asked to prepare a model that we've already declared support for
+ // and the operation indices may be different to those in getSupportedOperations anyway.
+ std::set<unsigned int> unsupportedOperations;
+ ModelToINetworkConverter<HalPolicy> modelConverter(options.GetBackends(),
+ model,
+ unsupportedOperations);
+
+ if (modelConverter.GetConversionResult() != ConversionResult::Success)
+ {
+ FailPrepareModel(V1_3::ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+ return V1_3::ErrorStatus::NONE;
+ }
+
+ // Optimize the network
+ armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
+ armnn::OptimizerOptions OptOptions;
+ OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
+
+ std::vector<std::string> errMessages;
+ try
+ {
+ optNet = armnn::Optimize(*modelConverter.GetINetwork(),
+ options.GetBackends(),
+ runtime->GetDeviceSpec(),
+ OptOptions,
+ errMessages);
+ }
+ catch (std::exception& e)
+ {
+ std::stringstream message;
+ message << "Exception (" << e.what() << ") caught from optimize.";
+ FailPrepareModel(V1_3::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return V1_3::ErrorStatus::NONE;
+ }
+
+ // Check that the optimized network is valid.
+ if (!optNet)
+ {
+ std::stringstream message;
+ message << "Invalid optimized network";
+ for (const std::string& msg : errMessages)
+ {
+ message << "\n" << msg;
+ }
+ FailPrepareModel(V1_3::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return V1_3::ErrorStatus::NONE;
+ }
+
+ // Export the optimized network graph to a dot file if an output dump directory
+ // has been specified in the drivers' arguments.
+ std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
+ options.GetRequestInputsAndOutputsDumpDir());
+
+ // Load it into the runtime.
+ armnn::NetworkId netId = 0;
+ try
+ {
+ if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
+ {
+ return FailPrepareModel(V1_3::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
+ }
+ }
+ catch (std::exception& e)
+ {
+ std::stringstream message;
+ message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
+ FailPrepareModel(V1_3::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return V1_3::ErrorStatus::NONE;
+ }
+
+ // Now that we have a networkId for the graph rename the dump file to use it
+ // so that we can associate the graph file and the input/output tensor dump files
+ RenameGraphDotFile(dotGraphFileName,
+ options.GetRequestInputsAndOutputsDumpDir(),
+ netId);
+
+ std::unique_ptr<ArmnnPreparedModel_1_3<hal_1_3::HalPolicy>> preparedModel(
+ new ArmnnPreparedModel_1_3<hal_1_3::HalPolicy>(
+ netId,
+ runtime.get(),
+ model,
+ options.GetRequestInputsAndOutputsDumpDir(),
+ options.IsGpuProfilingEnabled()));
+
+ // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
+ // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
+ if (!preparedModel->ExecuteWithDummyInputs())
+ {
+ return FailPrepareModel(V1_3::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+ }
+
+ if (clTunedParameters &&
+ options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
+ {
+ // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file.
+ try
+ {
+ clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
+ }
+ catch (std::exception& error)
+ {
+ ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
+ options.GetClTunedParametersFile().c_str(), error.what());
+ }
+ }
+
+ NotifyCallbackAndCheck(cb, V1_3::ErrorStatus::NONE, preparedModel.release());
+
+ return V1_3::ErrorStatus::NONE;
+}
+
+Return<void> ArmnnDriverImpl::getCapabilities_1_3(const armnn::IRuntimePtr& runtime,
+ V1_3::IDevice::getCapabilities_1_3_cb cb)
+{
+ ALOGV("hal_1_3::ArmnnDriverImpl::getCapabilities()");
+
+ V1_3::Capabilities capabilities;
+
+ float defaultValue = .1f;
+
+ if (runtime)
+ {
+ capabilities.relaxedFloat32toFloat16PerformanceScalar.execTime =
+ ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, defaultValue);
+
+ capabilities.relaxedFloat32toFloat16PerformanceTensor.powerUsage =
+ ParseSystemProperty(g_RelaxedFloat32toFloat16PerformancePowerUsage, defaultValue);
+
+ // Set the base value for all operand types
+ capabilities.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({FLT_MAX, FLT_MAX});
+
+ // Load supported operand types
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorFloat32PerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat32PerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeFloat32PerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeFloat32PerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT16,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorFloat16PerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat16PerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT16,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeFloat16PerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeFloat16PerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_SYMM,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformancePowerUsage, defaultValue)
+ });
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmSignedPerformanceExecTime,
+ defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmSignedPerformancePowerUsage,
+ defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT16_SYMM,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
+ {
+ .execTime =
+ ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformanceExecTime, defaultValue),
+ .powerUsage =
+ ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_INT32,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeTensorInt32PerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeTensorInt32PerformancePowerUsage, defaultValue)
+ });
+
+ update(&capabilities.operandPerformance, V1_3::OperandType::INT32,
+ {
+ .execTime = ParseSystemProperty(g_OperandTypeInt32PerformanceExecTime, defaultValue),
+ .powerUsage = ParseSystemProperty(g_OperandTypeInt32PerformancePowerUsage, defaultValue)
+ });
+
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ }
+ else
+ {
+ capabilities.relaxedFloat32toFloat16PerformanceScalar.execTime = 0;
+ capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = 0;
+
+ // Set the base value for all operand types
+ capabilities.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({0.f, 0.0f});
+
+ cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+ }
+
+ return Void();
+}
+
+} // namespace hal_1_3
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.3/ArmnnDriverImpl.hpp b/1.3/ArmnnDriverImpl.hpp
new file mode 100644
index 00000000..8a665ea5
--- /dev/null
+++ b/1.3/ArmnnDriverImpl.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "../DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+using namespace android::nn::hal;
+
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
+
+namespace armnn_driver
+{
+namespace hal_1_3
+{
+
+class ArmnnDriverImpl
+{
+public:
+ static Return<V1_3::ErrorStatus> prepareArmnnModel_1_3(const armnn::IRuntimePtr& runtime,
+ const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+ const DriverOptions& options,
+ const V1_3::Model& model,
+ const android::sp<V1_3::IPreparedModelCallback>& cb,
+ bool float32ToFloat16 = false);
+
+ static Return<void> getCapabilities_1_3(const armnn::IRuntimePtr& runtime,
+ V1_3::IDevice::getCapabilities_1_3_cb cb);
+};
+
+} // namespace hal_1_3
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
new file mode 100644
index 00000000..0de7573a
--- /dev/null
+++ b/1.3/HalPolicy.cpp
@@ -0,0 +1,451 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "HalPolicy.hpp"
+
+namespace armnn_driver
+{
+namespace hal_1_3
+{
+
+using namespace armnn;
+
+namespace
+{
+
+} // anonymouse namespace
+
+bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
+{
+ switch (operation.type)
+ {
+ case V1_3::OperationType::ABS:
+ return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
+ case V1_3::OperationType::ADD:
+ return ConvertAdd(operation, model, data);
+ case V1_3::OperationType::ARGMAX:
+ return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
+ case V1_3::OperationType::ARGMIN:
+ return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
+ case V1_3::OperationType::AVERAGE_POOL_2D:
+ return ConvertAveragePool2d(operation, model, data);
+ case V1_3::OperationType::BATCH_TO_SPACE_ND:
+ return ConvertBatchToSpaceNd(operation, model, data);
+ case V1_3::OperationType::CONCATENATION:
+ return ConvertConcatenation(operation, model, data);
+ case V1_3::OperationType::CONV_2D:
+ return ConvertConv2d(operation, model, data);
+ case V1_3::OperationType::DEPTH_TO_SPACE:
+ return ConvertDepthToSpace(operation, model, data);
+ case V1_3::OperationType::DEPTHWISE_CONV_2D:
+ return ConvertDepthwiseConv2d(operation, model, data);
+ case V1_3::OperationType::DEQUANTIZE:
+ return ConvertDequantize(operation, model, data);
+ case V1_3::OperationType::DIV:
+ return ConvertDiv(operation, model, data);
+ case V1_3::OperationType::EQUAL:
+ return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
+ case V1_3::OperationType::EXPAND_DIMS:
+ return ConvertExpandDims(operation, model, data);
+ case V1_3::OperationType::FLOOR:
+ return ConvertFloor(operation, model, data);
+ case V1_3::OperationType::FULLY_CONNECTED:
+ return ConvertFullyConnected(operation, model, data);
+ case V1_3::OperationType::GREATER:
+ return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
+ case V1_3::OperationType::GREATER_EQUAL:
+ return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
+ case V1_3::OperationType::GROUPED_CONV_2D:
+ return ConvertGroupedConv2d(operation, model, data);
+ case V1_3::OperationType::INSTANCE_NORMALIZATION:
+ return ConvertInstanceNormalization(operation, model, data);
+ case V1_3::OperationType::L2_NORMALIZATION:
+ return ConvertL2Normalization(operation, model, data);
+ case V1_3::OperationType::L2_POOL_2D:
+ return ConvertL2Pool2d(operation, model, data);
+ case V1_3::OperationType::LESS:
+ return ConvertComparison(operation, model, data, ComparisonOperation::Less);
+ case V1_3::OperationType::LESS_EQUAL:
+ return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
+ case V1_3::OperationType::LOCAL_RESPONSE_NORMALIZATION:
+ return ConvertLocalResponseNormalization(operation, model, data);
+ case V1_3::OperationType::LOGISTIC:
+ return ConvertLogistic(operation, model, data);
+ case V1_3::OperationType::LOG_SOFTMAX:
+ return ConvertLogSoftmax(operation, model, data);
+ case V1_3::OperationType::LSTM:
+ return ConvertLstm(operation, model, data);
+ case V1_3::OperationType::MAX_POOL_2D:
+ return ConvertMaxPool2d(operation, model, data);
+ case V1_3::OperationType::MAXIMUM:
+ return ConvertMaximum(operation, model, data);
+ case V1_3::OperationType::MEAN:
+ return ConvertMean(operation, model, data);
+ case V1_3::OperationType::MINIMUM:
+ return ConvertMinimum(operation, model, data);
+ case V1_3::OperationType::MUL:
+ return ConvertMul(operation, model, data);
+ case V1_3::OperationType::NEG:
+ return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
+ case V1_3::OperationType::NOT_EQUAL:
+ return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
+ case V1_3::OperationType::PAD:
+ return ConvertPad(operation, model, data);
+ case V1_3::OperationType::PAD_V2:
+ return ConvertPadV2(operation, model, data);
+ case V1_3::OperationType::PRELU:
+ return ConvertPrelu(operation, model, data);
+ case V1_3::OperationType::QUANTIZE:
+ return ConvertQuantize(operation, model, data);
+ case V1_3::OperationType::QUANTIZED_16BIT_LSTM:
+ return ConvertQuantizedLstm(operation, model, data);
+ case V1_3::OperationType::RELU:
+ return ConvertReLu(operation, model, data);
+ case V1_3::OperationType::RELU1:
+ return ConvertReLu1(operation, model, data);
+ case V1_3::OperationType::RELU6:
+ return ConvertReLu6(operation, model, data);
+ case V1_3::OperationType::RESHAPE:
+ return ConvertReshape(operation, model, data);
+ case V1_3::OperationType::RESIZE_BILINEAR:
+ return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
+ case V1_3::OperationType::RESIZE_NEAREST_NEIGHBOR:
+ return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
+ case V1_3::OperationType::RSQRT:
+ return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
+ case V1_3::OperationType::SQRT:
+ return ConvertSqrt(operation, model, data);
+ case V1_3::OperationType::SQUEEZE:
+ return ConvertSqueeze(operation, model, data);
+ case V1_3::OperationType::STRIDED_SLICE:
+ return ConvertStridedSlice(operation, model, data);
+ case V1_3::OperationType::TRANSPOSE:
+ return ConvertTranspose(operation, model, data);
+ case V1_3::OperationType::TRANSPOSE_CONV_2D:
+ return ConvertTransposeConv2d(operation, model, data);
+ case V1_3::OperationType::SOFTMAX:
+ return ConvertSoftmax(operation, model, data);
+ case V1_3::OperationType::SPACE_TO_BATCH_ND :
+ return ConvertSpaceToBatchNd(operation, model, data);
+ case V1_3::OperationType::SPACE_TO_DEPTH:
+ return ConvertSpaceToDepth(operation, model, data);
+ case V1_3::OperationType::SUB:
+ return ConvertSub(operation, model, data);
+ case V1_3::OperationType::TANH:
+ return ConvertTanH(operation, model, data);
+ default:
+ return Fail("%s: Operation type %s not supported in ArmnnDriver",
+ __func__, toString(operation.type).c_str());
+ }
+}
+
+bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertAdd()");
+ return ::ConvertAdd<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertArgMinMax(const V1_3::Operation& operation,
+ const V1_3::Model& model,
+ ConversionData& data,
+ armnn::ArgMinMaxFunction argMinMaxFunction)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertArgMinMax()");
+ return ::ConvertArgMinMax<hal_1_3::HalPolicy>(operation, model, data, argMinMaxFunction);
+}
+
+bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertAveragePool2d()");
+ return ConvertPooling2d<hal_1_3::HalPolicy>(operation, __func__, PoolingAlgorithm::Average, model, data);
+}
+
+bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertBatchToSpaceNd()");
+ return ::ConvertBatchToSpaceNd<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertComparison(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ ComparisonOperation comparisonOperation)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertComparison()");
+ return ::ConvertComparison_1_2<hal_1_3::HalPolicy>(operation, model, data, comparisonOperation);
+}
+
+
+bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertConcatenation()");
+ return ::ConvertConcatenation<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertConv2d()");
+ return ::ConvertConv2d_1_2<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertDepthToSpace()");
+ return ::ConvertDepthToSpace<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertDepthwiseConv2d()");
+ return ::ConvertDepthwiseConv2d_1_2<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertDequantize()");
+ return ::ConvertDequantize_1_2<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertDiv()");
+ return ::ConvertDiv<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertElementwiseUnary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ UnaryOperation unaryOperation)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertElementwiseUnary()");
+ return ::ConvertElementwiseUnary<hal_1_3::HalPolicy>(operation, model, data, unaryOperation);
+}
+
+bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertExpandDims()");
+ return ::ConvertExpandDims<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertFloor()");
+ return ::ConvertFloor<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertFullyConnected()");
+ return ::ConvertFullyConnected<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertGroupedConv2d()");
+ return ::ConvertGroupedConv2d<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertInstanceNormalization()");
+ return ::ConvertInstanceNormalization<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertL2Normalization()");
+ return ::ConvertL2Normalization<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertL2Pool2d()");
+ return ConvertPooling2d<hal_1_3::HalPolicy>(operation, __func__, PoolingAlgorithm::L2, model, data);
+}
+
+bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
+ const Model& model,
+ ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertLocalResponseNormalization()");
+ return ::ConvertLocalResponseNormalization<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertLogistic()");
+ return ::ConvertLogistic<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertLogSoftmax()");
+ return ::ConvertLogSoftmax<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertLstm()");
+ return ::ConvertLstm<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertMaxPool2d()");
+ return ConvertPooling2d<hal_1_3::HalPolicy>(operation, __func__, PoolingAlgorithm::Max, model, data);
+}
+
+bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertConvertMaximumMaximum()");
+ return ::ConvertMaximum<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertMean()");
+ return ::ConvertMean<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertMinimum()");
+ return ::ConvertMinimum<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertMul()");
+ return ::ConvertMul<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertPad()");
+ return ::ConvertPad<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertPadV2()");
+ return ::ConvertPadV2<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertPrelu()");
+ return ::ConvertPrelu<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertQuantize()");
+ return ::ConvertQuantize<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertQuantizedLstm()");
+ return ::ConvertQuantizedLstm<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertReLu()");
+ return ::ConvertReLu<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertReLu1()");
+ return ::ConvertReLu1<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertReLu6()");
+ return ::ConvertReLu6<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertReshape()");
+ return ::ConvertReshape<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertResize(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ ResizeMethod resizeMethod)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertResize()");
+ return ::ConvertResize<hal_1_3::HalPolicy>(operation, model, data, resizeMethod);
+}
+
+bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSpaceToBatchNd()");
+ return ::ConvertSpaceToBatchNd<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSpaceToDepth()");
+ return ::ConvertSpaceToDepth<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSoftmax()");
+ return ::ConvertSoftmax<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSub()");
+ return ::ConvertSub<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertTanH()");
+ return ::ConvertTanH<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertTransposeConv2d()");
+ return ::ConvertTransposeConv2d<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSqrt()");
+ ActivationDescriptor desc;
+ desc.m_Function = ActivationFunction::Sqrt;
+
+ return ::ConvertToActivation<hal_1_3::HalPolicy>(operation, __func__, desc, model, data);
+}
+
+bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertSqueeze()");
+ return ::ConvertSqueeze<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertStridedSlice()");
+ return ::ConvertStridedSlice<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertTranspose()");
+ return ::ConvertTranspose<hal_1_3::HalPolicy>(operation, model, data);
+}
+
+} // namespace hal_1_3
+} // namespace armnn_driver
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
new file mode 100644
index 00000000..f7771a6c
--- /dev/null
+++ b/1.3/HalPolicy.hpp
@@ -0,0 +1,150 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "../ConversionUtils.hpp"
+#include "../ConversionUtils_1_2.hpp"
+
+#include <HalInterfaces.h>
+
+#include <armnn/Types.hpp>
+
+namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
+
+namespace armnn_driver
+{
+namespace hal_1_3
+{
+
+class HalPolicy
+{
+public:
+ using Model = V1_3::Model;
+ using Operand = V1_3::Operand;
+ using OperandLifeTime = V1_3::OperandLifeTime;
+ using OperandType = V1_3::OperandType;
+ using Operation = V1_3::Operation;
+ using OperationType = V1_3::OperationType;
+ using ExecutionCallback = V1_3::IExecutionCallback;
+ using getSupportedOperations_cb = V1_3::IDevice::getSupportedOperations_1_3_cb;
+ using ErrorStatus = V1_3::ErrorStatus;
+
+ static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
+
+private:
+ static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertArgMinMax(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::ArgMinMaxFunction argMinMaxFunction);
+
+ static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertComparison(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::ComparisonOperation comparisonOperation);
+
+ static bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertElementwiseUnary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::UnaryOperation unaryOperation);
+
+ static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertLocalResponseNormalization(const Operation& operation,
+ const Model& model,
+ ConversionData& data);
+
+ static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertResize(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::ResizeMethod resizeMethod);
+
+ static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);
+
+ static bool ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data);
+};
+
+} // namespace hal_1_3
+} // namespace armnn_driver