aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-04-04 13:33:10 +0100
committerNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-04-04 13:33:10 +0100
commitd5fd9767d426ca465eb660be062eecce51ad1097 (patch)
treeb2a2b684c9072dda7b52b40e579a5b0c4aae3c21
parent16196e267833178dae62926ff090c50ec6813ad4 (diff)
downloadandroid-nn-driver-d5fd9767d426ca465eb660be062eecce51ad1097.tar.gz
IVGCVSW-2886 Support multiple backends in Android driver
Change-Id: I4abe1f750801911570b6dc65c187b828c5929b5f Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
-rw-r--r--1.0/HalPolicy.cpp206
-rw-r--r--1.1/HalPolicy.cpp106
-rw-r--r--ArmnnDriverImpl.cpp14
-rw-r--r--ConversionUtils.cpp14
-rw-r--r--ConversionUtils.hpp59
-rw-r--r--DriverOptions.cpp51
-rw-r--r--DriverOptions.hpp6
-rw-r--r--ModelToINetworkConverter.cpp4
-rw-r--r--ModelToINetworkConverter.hpp3
9 files changed, 249 insertions, 214 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index a5912620..dee4a7a5 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -90,12 +90,12 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
- if (!IsLayerSupported(__func__,
- armnn::IsAdditionSupported,
- data.m_Compute,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outInfo))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsAdditionSupported,
+ data.m_Backends,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outInfo))
{
return false;
}
@@ -289,12 +289,12 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo
std::vector<const armnn::TensorInfo*> inputTensorInfos;
std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
[](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
- if (!IsLayerSupported(__func__,
- armnn::IsMergerSupported,
- data.m_Compute,
- inputTensorInfos,
- outputInfo,
- mergerDescriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsMergerSupported,
+ data.m_Backends,
+ inputTensorInfos,
+ outputInfo,
+ mergerDescriptor))
{
return false;
}
@@ -420,14 +420,14 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co
desc.m_BiasEnabled = true;
armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
- if (!IsLayerSupported(__func__,
- armnn::IsConvolution2dSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- desc,
- weights.GetInfo(),
- biases))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsConvolution2dSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ desc,
+ weights.GetInfo(),
+ biases))
{
return false;
}
@@ -546,14 +546,14 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
desc.m_BiasEnabled = true;
armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
- if (!IsLayerSupported(__func__,
- armnn::IsDepthwiseConvolutionSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- desc,
- weights.GetInfo(),
- biases))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsDepthwiseConvolutionSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ desc,
+ weights.GetInfo(),
+ biases))
{
return false;
}
@@ -589,11 +589,11 @@ bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, Con
return Fail("%s: Operation has invalid outputs", __func__);
}
- if (!IsLayerSupported(__func__,
- armnn::IsFloorSupported,
- data.m_Compute,
- input.GetTensorInfo(),
- GetTensorInfoForOperand(*outputOperand)))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsFloorSupported,
+ data.m_Backends,
+ input.GetTensorInfo(),
+ GetTensorInfoForOperand(*outputOperand)))
{
return false;
}
@@ -667,14 +667,14 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m
desc.m_TransposeWeightMatrix = true;
desc.m_BiasEnabled = true;
- if (!IsLayerSupported(__func__,
- armnn::IsFullyConnectedSupported,
- data.m_Compute,
- reshapedInfo,
- outputInfo,
- weights.GetInfo(),
- bias.GetInfo(),
- desc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsFullyConnectedSupported,
+ data.m_Backends,
+ reshapedInfo,
+ outputInfo,
+ weights.GetInfo(),
+ bias.GetInfo(),
+ desc))
{
return false;
}
@@ -746,12 +746,12 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
// window rather than the radius as in AndroidNN.
descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
- if (!IsLayerSupported(__func__,
- armnn::IsNormalizationSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- descriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsNormalizationSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
{
return false;
}
@@ -1037,34 +1037,34 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv
cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
}
- if (!IsLayerSupported(__func__,
- armnn::IsLstmSupported,
- data.m_Compute,
- inputInfo,
- outputStateInInfo,
- cellStateInInfo,
- scratchBufferInfo,
- outputStateOutInfo,
- cellStateOutInfo,
- outputInfo,
- desc,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- forgetGateBias,
- cellBias,
- outputGateBias,
- inputToInputWeights,
- recurrentToInputWeights,
- cellToInputWeights,
- inputGateBias,
- projectionWeights,
- projectionBias,
- cellToForgetWeights,
- cellToOutputWeights))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsLstmSupported,
+ data.m_Backends,
+ inputInfo,
+ outputStateInInfo,
+ cellStateInInfo,
+ scratchBufferInfo,
+ outputStateOutInfo,
+ cellStateOutInfo,
+ outputInfo,
+ desc,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ inputToInputWeights,
+ recurrentToInputWeights,
+ cellToInputWeights,
+ inputGateBias,
+ projectionWeights,
+ projectionBias,
+ cellToForgetWeights,
+ cellToOutputWeights))
{
return false;
}
@@ -1102,12 +1102,12 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model&
armnn::L2NormalizationDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
- if (!IsLayerSupported(__func__,
- armnn::IsL2NormalizationSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- desc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsL2NormalizationSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ desc))
{
return false;
}
@@ -1156,12 +1156,12 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
- if (!IsLayerSupported(__func__,
- armnn::IsMultiplicationSupported,
- data.m_Compute,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outInfo))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsMultiplicationSupported,
+ data.m_Backends,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outInfo))
{
return false;
}
@@ -1232,12 +1232,12 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return Fail("%s: Operation has invalid inputs", __func__);
}
- if (!IsLayerSupported(__func__,
- armnn::IsSoftmaxSupported,
- data.m_Compute,
- input.GetTensorInfo(),
- outInfo,
- desc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsSoftmaxSupported,
+ data.m_Backends,
+ input.GetTensorInfo(),
+ outInfo,
+ desc))
{
return false;
}
@@ -1311,11 +1311,11 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C
reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
requestedShape.dimensions.data());
- if (!IsLayerSupported(__func__,
- armnn::IsReshapeSupported,
- data.m_Compute,
- input.GetTensorInfo(),
- reshapeDescriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsReshapeSupported,
+ data.m_Backends,
+ input.GetTensorInfo(),
+ reshapeDescriptor))
{
return false;
}
@@ -1347,11 +1347,11 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m
armnn::ResizeBilinearDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
- if (!IsLayerSupported(__func__,
- armnn::IsResizeBilinearSupported,
- data.m_Compute,
- inputInfo,
- outputInfo))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsResizeBilinearSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo))
{
return false;
}
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 5530d310..9a0c1bf2 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -76,12 +76,12 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
- if (!IsLayerSupported(__func__,
- armnn::IsDivisionSupported,
- data.m_Compute,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outInfo))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsDivisionSupported,
+ data.m_Backends,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outInfo))
{
return false;
}
@@ -127,12 +127,12 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
- if (!IsLayerSupported(__func__,
- armnn::IsSubtractionSupported,
- data.m_Compute,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outInfo))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsSubtractionSupported,
+ data.m_Backends,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outInfo))
{
return false;
}
@@ -200,12 +200,12 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (!IsLayerSupported(__func__,
- armnn::IsMeanSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- descriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsMeanSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
{
return false;
}
@@ -266,12 +266,12 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (!IsLayerSupported(__func__,
- armnn::IsPadSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- descriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsPadSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
{
return false;
}
@@ -351,12 +351,12 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
}
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (!IsLayerSupported(__func__,
- armnn::IsSpaceToBatchNdSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- descriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsSpaceToBatchNdSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
{
return false;
}
@@ -428,11 +428,11 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C
return Fail("%s: Could not read output 0", __func__);
}
- if (!IsLayerSupported(__func__,
- armnn::IsReshapeSupported,
- data.m_Compute,
- inputInfo,
- reshapeDesc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsReshapeSupported,
+ data.m_Backends,
+ inputInfo,
+ reshapeDesc))
{
return false;
}
@@ -517,12 +517,12 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod
}
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (!IsLayerSupported(__func__,
- armnn::IsStridedSliceSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- descriptor))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsStridedSliceSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ descriptor))
{
return false;
}
@@ -590,12 +590,12 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model,
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (!IsLayerSupported(__func__,
- armnn::IsPermuteSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- permuteDesc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsPermuteSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ permuteDesc))
{
return false;
}
@@ -657,12 +657,12 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (!IsLayerSupported(__func__,
- armnn::IsBatchToSpaceNdSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- batchToSpaceNdDesc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsBatchToSpaceNdSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ batchToSpaceNdDesc))
{
return false;
}
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index f6456ee1..40bd80ab 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -77,9 +77,9 @@ Return<void> ArmnnDriverImpl<HalPolicy>::getSupportedOperations(const armnn::IRu
}
// Attempt to convert the model to an ArmNN input network (INetwork).
- ModelToINetworkConverter<HalPolicy> modelConverter(options.GetComputeDevice(),
- model,
- options.GetForcedUnsupportedOperations());
+ ModelToINetworkConverter<HalPolicy> modelConverter(options.GetBackends(),
+ model,
+ options.GetForcedUnsupportedOperations());
if (modelConverter.GetConversionResult() != ConversionResult::Success
&& modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
@@ -132,9 +132,9 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
// at this point we're being asked to prepare a model that we've already declared support for
// and the operation indices may be different to those in getSupportedOperations anyway.
set<unsigned int> unsupportedOperations;
- ModelToINetworkConverter<HalPolicy> modelConverter(options.GetComputeDevice(),
- model,
- unsupportedOperations);
+ ModelToINetworkConverter<HalPolicy> modelConverter(options.GetBackends(),
+ model,
+ unsupportedOperations);
if (modelConverter.GetConversionResult() != ConversionResult::Success)
{
@@ -151,7 +151,7 @@ Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
try
{
optNet = armnn::Optimize(*modelConverter.GetINetwork(),
- {options.GetComputeDevice()},
+ options.GetBackends(),
runtime->GetDeviceSpec(),
OptOptions,
errMessages);
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
index 60d1a1f4..fb71c759 100644
--- a/ConversionUtils.cpp
+++ b/ConversionUtils.cpp
@@ -150,12 +150,12 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
}
}
- if (!IsLayerSupported(__func__,
- armnn::IsActivationSupported,
- data.m_Compute,
- prevLayer->GetOutputSlot(0).GetTensorInfo(),
- tensorInfo,
- activationDesc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsActivationSupported,
+ data.m_Backends,
+ prevLayer->GetOutputSlot(0).GetTensorInfo(),
+ tensorInfo,
+ activationDesc))
{
return nullptr;
}
@@ -169,4 +169,4 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
return activationLayer;
}
-} // namespace armnn_driver \ No newline at end of file
+} // namespace armnn_driver
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index ca1f0aea..de4516c0 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -19,6 +19,7 @@
#include <boost/test/tools/floating_point_comparison.hpp>
#include <log/log.h>
+#include <vector>
namespace armnn_driver
{
@@ -29,12 +30,12 @@ namespace armnn_driver
struct ConversionData
{
- ConversionData(armnn::Compute compute)
- : m_Compute(compute)
- , m_Network(nullptr, nullptr)
+ ConversionData(const std::vector<armnn::BackendId>& backends)
+ : m_Backends(backends)
+ , m_Network(nullptr, nullptr)
{}
- const armnn::Compute m_Compute;
+ const std::vector<armnn::BackendId> m_Backends;
armnn::INetworkPtr m_Network;
std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
std::vector<android::nn::RunTimePoolInfo> m_MemPools;
@@ -139,6 +140,24 @@ bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... ar
}
}
+template<typename IsLayerSupportedFunc, typename ... Args>
+bool IsLayerSupportedForAnyBackend(const char* funcName,
+ IsLayerSupportedFunc f,
+ const std::vector<armnn::BackendId>& backends,
+ Args&&... args)
+{
+ for (auto&& backend : backends)
+ {
+ if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
+ {
+ return true;
+ }
+ }
+
+ ALOGD("%s: not supported by any specified backend", funcName);
+ return false;
+}
+
armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
{
return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
@@ -809,10 +828,10 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
if (tensorPin.IsValid())
{
- if (!IsLayerSupported(__func__,
- armnn::IsConstantSupported,
- data.m_Compute,
- tensorPin.GetConstTensor().GetInfo()))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsConstantSupported,
+ data.m_Backends,
+ tensorPin.GetConstTensor().GetInfo()))
{
return LayerInputHandle();
}
@@ -859,12 +878,12 @@ bool ConvertToActivation(const HalOperation& operation,
return false;
}
const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
- if (!IsLayerSupported(__func__,
- armnn::IsActivationSupported,
- data.m_Compute,
- input.GetTensorInfo(),
- outInfo,
- activationDesc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsActivationSupported,
+ data.m_Backends,
+ input.GetTensorInfo(),
+ outInfo,
+ activationDesc))
{
return false;
}
@@ -976,12 +995,12 @@ bool ConvertPooling2d(const HalOperation& operation,
}
}
- if (!IsLayerSupported(__func__,
- armnn::IsPooling2dSupported,
- data.m_Compute,
- inputInfo,
- outputInfo,
- desc))
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsPooling2dSupported,
+ data.m_Backends,
+ inputInfo,
+ outputInfo,
+ desc))
{
return false;
}
diff --git a/DriverOptions.cpp b/DriverOptions.cpp
index 10919a7b..cd4b6bfc 100644
--- a/DriverOptions.cpp
+++ b/DriverOptions.cpp
@@ -16,6 +16,7 @@
#include <boost/algorithm/string/predicate.hpp>
#include <boost/program_options.hpp>
+#include <algorithm>
#include <cassert>
#include <functional>
#include <string>
@@ -28,7 +29,7 @@ namespace armnn_driver
{
DriverOptions::DriverOptions(armnn::Compute computeDevice, bool fp16Enabled)
- : m_ComputeDevice(computeDevice)
+ : m_Backends({computeDevice})
, m_VerboseLogging(false)
, m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
, m_EnableGpuProfiling(false)
@@ -36,24 +37,32 @@ DriverOptions::DriverOptions(armnn::Compute computeDevice, bool fp16Enabled)
{
}
-DriverOptions::DriverOptions(int argc, char** argv)
- : m_ComputeDevice(armnn::Compute::GpuAcc)
+DriverOptions::DriverOptions(const std::vector<armnn::BackendId>& backends, bool fp16Enabled)
+ : m_Backends(backends)
, m_VerboseLogging(false)
, m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
, m_EnableGpuProfiling(false)
+ , m_fp16Enabled(fp16Enabled)
+{
+}
+
+DriverOptions::DriverOptions(int argc, char** argv)
+ : m_VerboseLogging(false)
+ , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
+ , m_EnableGpuProfiling(false)
, m_fp16Enabled(false)
{
namespace po = boost::program_options;
- std::string computeDeviceAsString;
std::string unsupportedOperationsAsString;
std::string clTunedParametersModeAsString;
po::options_description optionsDesc("Options");
optionsDesc.add_options()
("compute,c",
- po::value<std::string>(&computeDeviceAsString)->default_value("GpuAcc"),
- "Which device to run layers on by default. Possible values are: CpuRef, CpuAcc, GpuAcc")
+ po::value<std::vector<std::string>>()->
+ multitoken()->default_value(std::vector<std::string>{"GpuAcc"}, "{GpuAcc}"),
+ "Which backend to run layers on. Possible values are: CpuRef, CpuAcc, GpuAcc")
("verbose-logging,v",
po::bool_switch(&m_VerboseLogging),
@@ -99,22 +108,26 @@ DriverOptions::DriverOptions(int argc, char** argv)
ALOGW("An error occurred attempting to parse program options: %s", e.what());
}
- if (computeDeviceAsString == "CpuRef")
- {
- m_ComputeDevice = armnn::Compute::CpuRef;
- }
- else if (computeDeviceAsString == "GpuAcc")
- {
- m_ComputeDevice = armnn::Compute::GpuAcc;
- }
- else if (computeDeviceAsString == "CpuAcc")
+ const std::vector<std::string> backends = variablesMap["compute"].as<std::vector<std::string>>();
+ const std::vector<string> supportedDevices({"CpuRef", "CpuAcc", "GpuAcc"});
+ m_Backends.reserve(backends.size());
+
+ for (auto&& backend : backends)
{
- m_ComputeDevice = armnn::Compute::CpuAcc;
+ if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
+ {
+ ALOGW("Requested unknown backend %s", backend.c_str());
+ }
+ else
+ {
+ m_Backends.emplace_back(backend);
+ }
}
- else
+
+ if (m_Backends.empty())
{
- ALOGW("Requested unknown compute device %s. Defaulting to compute id %s",
- computeDeviceAsString.c_str(), GetComputeDeviceAsCString(m_ComputeDevice));
+ m_Backends.emplace_back("GpuAcc");
+ ALOGW("No known backend specified. Defaulting to: GpuAcc");
}
if (!unsupportedOperationsAsString.empty())
diff --git a/DriverOptions.hpp b/DriverOptions.hpp
index 7271ac16..637ccd64 100644
--- a/DriverOptions.hpp
+++ b/DriverOptions.hpp
@@ -9,6 +9,7 @@
#include <set>
#include <string>
+#include <vector>
namespace armnn_driver
{
@@ -17,10 +18,11 @@ class DriverOptions
{
public:
DriverOptions(armnn::Compute computeDevice, bool fp16Enabled = false);
+ DriverOptions(const std::vector<armnn::BackendId>& backends, bool fp16Enabled);
DriverOptions(int argc, char** argv);
DriverOptions(DriverOptions&& other) = default;
- armnn::Compute GetComputeDevice() const { return m_ComputeDevice; }
+ const std::vector<armnn::BackendId>& GetBackends() const { return m_Backends; }
bool IsVerboseLoggingEnabled() const { return m_VerboseLogging; }
const std::string& GetRequestInputsAndOutputsDumpDir() const { return m_RequestInputsAndOutputsDumpDir; }
const std::set<unsigned int>& GetForcedUnsupportedOperations() const { return m_ForcedUnsupportedOperations; }
@@ -30,7 +32,7 @@ public:
bool GetFp16Enabled() const { return m_fp16Enabled; }
private:
- armnn::Compute m_ComputeDevice;
+ std::vector<armnn::BackendId> m_Backends;
bool m_VerboseLogging;
std::string m_RequestInputsAndOutputsDumpDir;
std::set<unsigned int> m_ForcedUnsupportedOperations;
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 8bf84e94..fccd7594 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -13,10 +13,10 @@ namespace armnn_driver
{
template<typename HalPolicy>
-ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(armnn::Compute compute,
+ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<armnn::BackendId>& backends,
const HalModel& model,
const std::set<unsigned int>& forcedUnsupportedOperations)
- : m_Data(compute)
+ : m_Data(backends)
, m_Model(model)
, m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
, m_ConversionResult(ConversionResult::Success)
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index a3758fd5..e78c5f02 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -11,6 +11,7 @@
#include <armnn/ArmNN.hpp>
#include <set>
+#include <vector>
namespace armnn_driver
{
@@ -30,7 +31,7 @@ class ModelToINetworkConverter
public:
using HalModel = typename HalPolicy::Model;
- ModelToINetworkConverter(armnn::Compute compute,
+ ModelToINetworkConverter(const std::vector<armnn::BackendId>& backends,
const HalModel& model,
const std::set<unsigned int>& forcedUnsupportedOperations);