aboutsummaryrefslogtreecommitdiff
path: root/1.2
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-06-11 16:35:25 +0100
committerMike Kelly <mike.kelly@arm.com>2019-06-11 16:35:25 +0100
commitb5fdf38f0c6596958fab2b84882f2792a31e585a (patch)
treed6b578b51c1923c759653d8a04efa90923ad4dd8 /1.2
parentb92f8901fc34749337ea7a9ad7a2717fc9490de5 (diff)
downloadandroid-nn-driver-b5fdf38f0c6596958fab2b84882f2792a31e585a.tar.gz
IVGCVSW-3181 Add HAL 1.2 support to android-nn-driver
* Updated Android.mk to build HAL 1.2 driver * Added 1.2 HalPolicy and ArmnnDriver * Added 1.2 ArmnnPreparedModel * Updated converters and utilities to accept new HAL 1.2 operands and operand types. Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I62856deab24e106f72cccce09468db4971756fa6
Diffstat (limited to '1.2')
-rw-r--r--1.2/ArmnnDriver.hpp208
-rw-r--r--1.2/ArmnnDriverImpl.cpp206
-rw-r--r--1.2/ArmnnDriverImpl.hpp34
-rw-r--r--1.2/HalPolicy.cpp144
-rw-r--r--1.2/HalPolicy.hpp31
5 files changed, 623 insertions, 0 deletions
diff --git a/1.2/ArmnnDriver.hpp b/1.2/ArmnnDriver.hpp
new file mode 100644
index 00000000..7460f396
--- /dev/null
+++ b/1.2/ArmnnDriver.hpp
@@ -0,0 +1,208 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "../ArmnnDevice.hpp"
+#include "ArmnnDriverImpl.hpp"
+#include "HalPolicy.hpp"
+
+#include "../ArmnnDriverImpl.hpp"
+#include "../1.2/ArmnnDriverImpl.hpp"
+#include "../1.2/HalPolicy.hpp"
+#include "../1.1/ArmnnDriverImpl.hpp"
+#include "../1.1/HalPolicy.hpp"
+#include "../1.0/ArmnnDriverImpl.hpp"
+#include "../1.0/HalPolicy.hpp"
+
+#include <log/log.h>
+
+namespace armnn_driver
+{
+namespace hal_1_2
+{
+
+class ArmnnDriver : public ArmnnDevice, public V1_2::IDevice
+{
+public:
+ ArmnnDriver(DriverOptions options)
+ : ArmnnDevice(std::move(options))
+ {
+ ALOGV("hal_1_2::ArmnnDriver::ArmnnDriver()");
+ }
+ ~ArmnnDriver() {}
+
+ using HidlToken = android::hardware::hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
+
+public:
+ Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getCapabilities()");
+
+ return hal_1_0::ArmnnDriverImpl::getCapabilities(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ V1_0::IDevice::getSupportedOperations_cb cb) override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getSupportedOperations()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<ErrorStatus> prepareModel(const V1_0::Model& model,
+ const android::sp<V1_0::IPreparedModelCallback>& cb) override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::prepareModel()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_0::HalPolicy>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<void> getCapabilities_1_1(V1_1::IDevice::getCapabilities_1_1_cb cb) override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getCapabilities_1_1()");
+
+ return hal_1_1::ArmnnDriverImpl::getCapabilities_1_1(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ V1_1::IDevice::getSupportedOperations_1_1_cb cb) override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getSupportedOperations_1_1()");
+ return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+ V1_1::ExecutionPreference preference,
+ const android::sp<V1_0::IPreparedModelCallback>& cb) override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1()");
+
+ if (!(preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED))
+ {
+ ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
+ cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb,
+ model.relaxComputationFloat32toFloat16
+ && m_Options.GetFp16Enabled());
+ }
+
+ Return<DeviceStatus> getStatus() override
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getStatus()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_2::HalPolicy>::getStatus();
+ }
+
+ Return<void> getVersionString(getVersionString_cb cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getSupportedOperations()");
+
+ cb(ErrorStatus::NONE, "ArmNN");
+ return Void();
+ }
+
+ Return<void> getType(getType_cb cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getType()");
+
+ cb(ErrorStatus::NONE, V1_2::DeviceType::CPU);
+ return Void();
+ }
+
+ Return<ErrorStatus> prepareModelFromCache(
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const HidlToken&,
+ const sp<V1_2::IPreparedModelCallback>& callback)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::prepareModelFromCache()");
+ callback->notify_1_2(ErrorStatus::GENERAL_FAILURE, nullptr);
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+
+ Return<ErrorStatus> prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
+ const android::hardware::hidl_vec<android::hardware::hidl_handle>&, const HidlToken&,
+ const android::sp<V1_2::IPreparedModelCallback>& cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1()");
+
+ if (!(preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED))
+ {
+ ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
+ cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ return ArmnnDriverImpl::prepareArmnnModel_1_2(m_Runtime,
+ m_ClTunedParameters,
+ m_Options,
+ model,
+ cb,
+ model.relaxComputationFloat32toFloat16
+ && m_Options.GetFp16Enabled());
+ }
+
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()");
+ cb(ErrorStatus::NONE, {/* No extensions. */});
+ return Void();
+ }
+
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getCapabilities()");
+
+ return hal_1_2::ArmnnDriverImpl::getCapabilities_1_2(m_Runtime, cb);
+ }
+
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getSupportedOperations()");
+
+ return armnn_driver::ArmnnDriverImpl<hal_1_2::HalPolicy>::getSupportedOperations(m_Runtime,
+ m_Options,
+ model,
+ cb);
+ }
+
+ Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb)
+ {
+ ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()");
+
+ // Set both numbers to be 0 for cache not supported.
+ cb(ErrorStatus::NONE, 0, 0);
+ return Void();
+ }
+};
+
+} // namespace hal_1_2
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp
new file mode 100644
index 00000000..97cfa5de
--- /dev/null
+++ b/1.2/ArmnnDriverImpl.cpp
@@ -0,0 +1,206 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ArmnnDriverImpl.hpp"
+#include "../ArmnnPreparedModel_1_2.hpp"
+#include "../ModelToINetworkConverter.hpp"
+#include "../SystemPropertiesUtils.hpp"
+
+#include <log/log.h>
+
+namespace
+{
+
+const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
+void NotifyCallbackAndCheck(const sp<V1_2::IPreparedModelCallback>& callback,
+ ErrorStatus errorStatus,
+ const sp<V1_2::IPreparedModel>& preparedModelPtr)
+{
+ Return<void> returned = callback->notify(errorStatus, preparedModelPtr);
+ // This check is required, if the callback fails and it isn't checked it will bring down the service
+ if (!returned.isOk())
+ {
+ ALOGE("ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ",
+ returned.description().c_str());
+ }
+}
+
+Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
+ const std::string& message,
+ const sp<V1_2::IPreparedModelCallback>& callback)
+{
+ ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str());
+ NotifyCallbackAndCheck(callback, error, nullptr);
+ return error;
+}
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+namespace hal_1_2
+{
+
+Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
+ const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+ const DriverOptions& options,
+ const V1_2::Model& model,
+ const sp<V1_2::IPreparedModelCallback>& cb,
+ bool float32ToFloat16)
+{
+ ALOGV("ArmnnDriverImpl::prepareModel()");
+
+ if (cb.get() == nullptr)
+ {
+ ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
+ return ErrorStatus::INVALID_ARGUMENT;
+ }
+
+ if (!runtime)
+ {
+ return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
+ }
+
+ if (!android::nn::validateModel(model))
+ {
+ return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
+ }
+
+ // Deliberately ignore any unsupported operations requested by the options -
+ // at this point we're being asked to prepare a model that we've already declared support for
+ // and the operation indices may be different to those in getSupportedOperations anyway.
+ std::set<unsigned int> unsupportedOperations;
+ ModelToINetworkConverter<HalPolicy> modelConverter(options.GetBackends(),
+ model,
+ unsupportedOperations);
+
+ if (modelConverter.GetConversionResult() != ConversionResult::Success)
+ {
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+ return ErrorStatus::NONE;
+ }
+
+ // Optimize the network
+ armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
+ armnn::OptimizerOptions OptOptions;
+ OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
+
+ std::vector<std::string> errMessages;
+ try
+ {
+ optNet = armnn::Optimize(*modelConverter.GetINetwork(),
+ options.GetBackends(),
+ runtime->GetDeviceSpec(),
+ OptOptions,
+ errMessages);
+ }
+ catch (armnn::Exception &e)
+ {
+ std::stringstream message;
+ message << "armnn::Exception (" << e.what() << ") caught from optimize.";
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return ErrorStatus::NONE;
+ }
+
+ // Check that the optimized network is valid.
+ if (!optNet)
+ {
+ std::stringstream message;
+ message << "Invalid optimized network";
+ for (const std::string& msg : errMessages)
+ {
+ message << "\n" << msg;
+ }
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return ErrorStatus::NONE;
+ }
+
+ // Export the optimized network graph to a dot file if an output dump directory
+ // has been specified in the drivers' arguments.
+ ExportNetworkGraphToDotFile<hal_1_2::HalPolicy::Model>(*optNet, options.GetRequestInputsAndOutputsDumpDir(),
+ model);
+
+ // Load it into the runtime.
+ armnn::NetworkId netId = 0;
+ try
+ {
+ if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
+ {
+ return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
+ }
+ }
+ catch (armnn::Exception& e)
+ {
+ std::stringstream message;
+ message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
+ FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+ return ErrorStatus::NONE;
+ }
+
+ std::unique_ptr<ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>> preparedModel(
+ new ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>(
+ netId,
+ runtime.get(),
+ model,
+ options.GetRequestInputsAndOutputsDumpDir(),
+ options.IsGpuProfilingEnabled()));
+
+ // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
+ // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
+ if (!preparedModel->ExecuteWithDummyInputs())
+ {
+ return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+ }
+
+ if (clTunedParameters &&
+ options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
+ {
+ // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file.
+ try
+ {
+ clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
+ }
+ catch (const armnn::Exception& error)
+ {
+ ALOGE("ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file '%s': %s",
+ options.GetClTunedParametersFile().c_str(), error.what());
+ }
+ }
+
+ NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release());
+
+ return ErrorStatus::NONE;
+}
+
+Return<void> ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runtime,
+ V1_2::IDevice::getCapabilities_1_2_cb cb)
+{
+ ALOGV("hal_1_2::ArmnnDriverImpl::getCapabilities()");
+
+ V1_2::Capabilities capabilities;
+
+ if (runtime)
+ {
+ capabilities.relaxedFloat32toFloat16PerformanceScalar.execTime =
+ ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f);
+
+ capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime =
+ ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f);
+
+ cb(ErrorStatus::NONE, capabilities);
+ }
+ else
+ {
+ capabilities.relaxedFloat32toFloat16PerformanceScalar.execTime = 0;
+ capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = 0;
+
+ cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+ }
+
+ return Void();
+}
+
+} // namespace hal_1_2
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.2/ArmnnDriverImpl.hpp b/1.2/ArmnnDriverImpl.hpp
new file mode 100644
index 00000000..b3c65079
--- /dev/null
+++ b/1.2/ArmnnDriverImpl.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <HalInterfaces.h>
+
+#include "../DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn_driver
+{
+namespace hal_1_2
+{
+
+class ArmnnDriverImpl
+{
+public:
+ static Return<ErrorStatus> prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
+ const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+ const DriverOptions& options,
+ const V1_2::Model& model,
+ const android::sp<V1_2::IPreparedModelCallback>& cb,
+ bool float32ToFloat16 = false);
+
+ static Return<void> getCapabilities_1_2(const armnn::IRuntimePtr& runtime,
+ V1_2::IDevice::getCapabilities_1_2_cb cb);
+};
+
+} // namespace hal_1_2
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
new file mode 100644
index 00000000..abc0cfca
--- /dev/null
+++ b/1.2/HalPolicy.cpp
@@ -0,0 +1,144 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "HalPolicy.hpp"
+
+#include "../1.0/HalPolicy.hpp"
+#include "../1.1/HalPolicy.hpp"
+
+namespace armnn_driver
+{
+namespace hal_1_2
+{
+
+bool HandledByV1_0(V1_2::OperationType operationType)
+{
+ switch (static_cast<V1_0::OperationType>(operationType))
+ {
+ case V1_0::OperationType::ADD:
+ case V1_0::OperationType::AVERAGE_POOL_2D:
+ case V1_0::OperationType::CONCATENATION:
+ case V1_0::OperationType::DEPTH_TO_SPACE:
+ case V1_0::OperationType::DEQUANTIZE:
+ case V1_0::OperationType::EMBEDDING_LOOKUP:
+ case V1_0::OperationType::FLOOR:
+ case V1_0::OperationType::FULLY_CONNECTED:
+ case V1_0::OperationType::HASHTABLE_LOOKUP:
+ case V1_0::OperationType::L2_NORMALIZATION:
+ case V1_0::OperationType::L2_POOL_2D:
+ case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
+ case V1_0::OperationType::LOGISTIC:
+ case V1_0::OperationType::LSH_PROJECTION:
+ case V1_0::OperationType::LSTM:
+ case V1_0::OperationType::MAX_POOL_2D:
+ case V1_0::OperationType::MUL:
+ case V1_0::OperationType::RELU:
+ case V1_0::OperationType::RELU1:
+ case V1_0::OperationType::RELU6:
+ case V1_0::OperationType::RESHAPE:
+ case V1_0::OperationType::RESIZE_BILINEAR:
+ case V1_0::OperationType::RNN:
+ case V1_0::OperationType::SOFTMAX:
+ case V1_0::OperationType::SPACE_TO_DEPTH:
+ case V1_0::OperationType::SVDF:
+ case V1_0::OperationType::TANH:
+ case V1_0::OperationType::OEM_OPERATION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool HandledByV1_1(V1_2::OperationType operationType)
+{
+ if (HandledByV1_0(operationType))
+ {
+ return true;
+ }
+ switch (static_cast<V1_1::OperationType>(operationType))
+ {
+ case V1_1::OperationType::BATCH_TO_SPACE_ND:
+ case V1_1::OperationType::DIV:
+ case V1_1::OperationType::MEAN:
+ case V1_1::OperationType::PAD:
+ case V1_1::OperationType::SPACE_TO_BATCH_ND:
+ case V1_1::OperationType::SQUEEZE:
+ case V1_1::OperationType::STRIDED_SLICE:
+ case V1_1::OperationType::SUB:
+ case V1_1::OperationType::TRANSPOSE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool HandledByV1_0(const V1_2::Operation& operation)
+{
+ return HandledByV1_0(operation.type);
+}
+
+bool HandledByV1_1(const V1_2::Operation& operation)
+{
+ return HandledByV1_1(operation.type);
+}
+
+V1_0::OperationType CastToV1_0(V1_2::OperationType type)
+{
+ return static_cast<V1_0::OperationType>(type);
+}
+
+V1_1::OperationType CastToV1_1(V1_2::OperationType type)
+{
+ return static_cast<V1_1::OperationType>(type);
+}
+
+V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
+{
+ V1_0::Operation op;
+ op.type = CastToV1_0(operation.type);
+ op.inputs = operation.inputs;
+ op.outputs = operation.outputs;
+ return op;
+}
+
+V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
+{
+ V1_1::Operation op;
+ op.type = CastToV1_1(operation.type);
+ op.inputs = operation.inputs;
+ op.outputs = operation.outputs;
+ return op;
+}
+
+bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
+{
+ if (HandledByV1_0(operation) && compliantWithV1_0(model))
+ {
+ hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
+ hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
+
+ return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
+ }
+ else if (HandledByV1_1(operation) && compliantWithV1_1(model))
+ {
+ hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
+ hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
+
+ return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
+ }
+ switch (operation.type)
+ {
+ case V1_2::OperationType::CONV_2D:
+ return ConvertConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
+ case V1_2::OperationType::DEPTHWISE_CONV_2D:
+ return ConvertDepthwiseConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
+ default:
+ return Fail("%s: Operation type %s not supported in ArmnnDriver",
+ __func__, toString(operation.type).c_str());
+ }
+}
+
+} // namespace hal_1_2
+} // namespace armnn_driver \ No newline at end of file
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
new file mode 100644
index 00000000..d27e4c7a
--- /dev/null
+++ b/1.2/HalPolicy.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "../ConversionUtils.hpp"
+
+#include <HalInterfaces.h>
+
+namespace armnn_driver
+{
+namespace hal_1_2
+{
+
+class HalPolicy
+{
+public:
+ using Model = V1_2::Model;
+ using Operand = V1_2::Operand;
+ using Operation = V1_2::Operation;
+ using OperationType = V1_2::OperationType;
+ using ExecutionCallback = V1_2::IExecutionCallback;
+ using getSupportedOperations_cb = V1_2::IDevice::getSupportedOperations_1_2_cb;
+
+ static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
+};
+
+} // namespace hal_1_2
+} // namespace armnn_driver \ No newline at end of file