aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-09-10 13:37:32 +0100
committerRyan O'Shea <ryan.oshea2@arm.com>2020-09-10 18:04:17 +0000
commit045f6be924240a560293a3a7a0ecae49bcf0d1fa (patch)
tree3193fb35288ad8011cdfb9082d82085f48b6792b /src/backends
parent08f4016b8ae8ee836fc813abcbc7db826924f3ec (diff)
downloadarmnn-045f6be924240a560293a3a7a0ecae49bcf0d1fa.tar.gz
IVGCVSW-5156 Introduce ModelOptions to OptimizedNetwork
* Introduced ModelOptions to IBackendInternal * Introduced ModelOptions to Network * Added FastMathEnabled parameter to Conv2d Validate function in CL and NEON * Added Optimizer tests Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ib54c1e82cb3d89a52756ed499cf91b6a7fdb2063
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/IBackendInternal.cpp6
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp35
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp12
-rw-r--r--src/backends/cl/CMakeLists.txt2
-rw-r--r--src/backends/cl/ClBackend.cpp24
-rw-r--r--src/backends/cl/ClBackend.hpp4
-rw-r--r--src/backends/cl/ClBackendContext.cpp17
-rw-r--r--src/backends/cl/ClBackendModelContext.cpp45
-rw-r--r--src/backends/cl/ClBackendModelContext.hpp23
-rw-r--r--src/backends/cl/ClLayerSupport.cpp33
-rw-r--r--src/backends/cl/ClLayerSupport.hpp9
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp30
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.hpp3
-rw-r--r--src/backends/neon/CMakeLists.txt2
-rw-r--r--src/backends/neon/NeonBackend.cpp21
-rw-r--r--src/backends/neon/NeonBackend.hpp4
-rw-r--r--src/backends/neon/NeonBackendModelContext.cpp45
-rw-r--r--src/backends/neon/NeonBackendModelContext.hpp23
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp30
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp10
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp30
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp13
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.hpp9
26 files changed, 397 insertions, 42 deletions
diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp
index a9d5a5493d..1cca61efa5 100644
--- a/src/backends/backendsCommon/IBackendInternal.cpp
+++ b/src/backends/backendsCommon/IBackendInternal.cpp
@@ -44,6 +44,12 @@ IBackendInternal::IBackendContextPtr IBackendInternal::CreateBackendContext(cons
return IBackendContextPtr{};
}
+IBackendInternal::IBackendSpecificModelContextPtr IBackendInternal::CreateBackendSpecificModelContext(
+ const ModelOptions&) const
+{
+ return IBackendSpecificModelContextPtr{};
+}
+
IBackendInternal::IBackendProfilingContextPtr IBackendInternal::CreateBackendProfilingContext(
const IRuntime::CreationOptions&, IBackendProfilingPtr&)
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 09d7c2d568..0bafda257c 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -39,10 +39,11 @@ const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> typ
} // anonymous namespace
-bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
- const IConnectableLayer& connectableLayer,
- Optional<DataType> dataType,
- std::string& outReasonIfUnsupported)
+bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
+ const IConnectableLayer& connectableLayer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported,
+ const ModelOptions& modelOptions)
{
Optional<std::string&> reason = outReasonIfUnsupported;
bool result;
@@ -61,7 +62,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
auto backendFactory = backendRegistry.GetFactory(backendId);
auto backendObject = backendFactory();
- auto layerSupportObject = backendObject->GetLayerSupport();
+ auto layerSupportObject = backendObject->GetLayerSupport(modelOptions);
switch(layer.GetType())
{
@@ -1212,12 +1213,34 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
return result;
}
+bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
+ const IConnectableLayer& connectableLayer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported)
+{
+ return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
+}
+
bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
Optional<DataType> dataType,
std::string& outReasonIfUnsupported)
{
auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
- return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
+ return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
+}
+
+// TODO merge with defaulted modelOptions above
+bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported,
+ const ModelOptions& modelOptions)
+{
+ auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
+ return IsLayerConfigurationSupported(layer->GetBackendId(),
+ connectableLayer,
+ dataType,
+ outReasonIfUnsupported,
+ modelOptions);
}
// Default Implementations
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 02503f6489..68f9da650e 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -34,6 +34,11 @@ public:
Optional<DataType> dataType,
std::string& outReasonIfUnsupported);
+ static bool IsLayerSupported(const IConnectableLayer& layer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported,
+ const ModelOptions& modelOptions);
+
virtual bool SupportsSubTensors() const = 0;
ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
@@ -259,6 +264,13 @@ public:
virtual std::unique_ptr<IWorkload> CreateTransposeConvolution2d(
const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+
+private:
+ static bool IsLayerConfigurationSupported(const BackendId& backendId,
+ const IConnectableLayer& connectableLayer,
+ Optional<DataType> dataType,
+ std::string& outReasonIfUnsupported,
+ const ModelOptions& modelOptions = {});
};
} // namespace armnn
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index f9f69f7729..4b5890af50 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -10,6 +10,8 @@ if(ARMCOMPUTECL)
ClBackendContext.cpp
ClBackendContext.hpp
ClBackendId.hpp
+ ClBackendModelContext.cpp
+ ClBackendModelContext.hpp
ClContextControl.cpp
ClContextControl.hpp
ClLayerSupport.cpp
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index f9a8993baf..49636d9b08 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -5,6 +5,7 @@
#include "ClBackend.hpp"
#include "ClBackendId.hpp"
+#include "ClBackendModelContext.hpp"
#include "ClWorkloadFactory.hpp"
#include "ClBackendContext.hpp"
#include "ClLayerSupport.hpp"
@@ -69,8 +70,7 @@ void ClBackend::RegisterTensorHandleFactories(TensorHandleFactoryRegistry& regis
registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(mgr));
}
-IBackendInternal::IBackendContextPtr
-ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
+IBackendInternal::IBackendContextPtr ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
{
return IBackendContextPtr{new ClBackendContext{options}};
}
@@ -86,9 +86,27 @@ IBackendInternal::Optimizations ClBackend::GetOptimizations() const
return Optimizations{};
}
+IBackendInternal::IBackendSpecificModelContextPtr ClBackend::CreateBackendSpecificModelContext(
+ const ModelOptions& modelOptions) const
+{
+ return IBackendSpecificModelContextPtr{new ClBackendModelContext{modelOptions}};
+}
+
IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport() const
{
- static ILayerSupportSharedPtr layerSupport{new ClLayerSupport};
+ static ILayerSupportSharedPtr layerSupport
+ {
+ new ClLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
+ };
+ return layerSupport;
+}
+
+IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport(const ModelOptions& modelOptions) const
+{
+ static ILayerSupportSharedPtr layerSupport
+ {
+ new ClLayerSupport(CreateBackendSpecificModelContext(modelOptions))
+ };
return layerSupport;
}
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index e85c616505..108124cac9 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -36,8 +36,12 @@ public:
IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+ IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+
+ IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(
+ const ModelOptions& modelOptions) const override;
};
} // namespace armnn
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index 42f42b3023..22a4ceabd3 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -99,7 +99,6 @@ bool ParseBoolean(const BackendOptions::Var& value, bool defaultValue)
{
return value.AsBool();
}
-
return defaultValue;
}
@@ -112,22 +111,6 @@ std::string ParseFile(const BackendOptions::Var& value, std::string defaultValue
return defaultValue;
}
-template <typename F>
-void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f)
-{
- for (auto optionsGroup : options)
- {
- if (optionsGroup.GetBackendId() == backend)
- {
- for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
- {
- const BackendOptions::BackendOption option = optionsGroup.GetOption(i);
- f(option.GetName(), option.GetValue());
- }
- }
- }
-}
-
void ConfigureTuner(arm_compute::CLTuner &tuner, TuningLevel level)
{
tuner.set_tune_new_kernels(true); // Turn on tuning initially.
diff --git a/src/backends/cl/ClBackendModelContext.cpp b/src/backends/cl/ClBackendModelContext.cpp
new file mode 100644
index 0000000000..0ef26b64d2
--- /dev/null
+++ b/src/backends/cl/ClBackendModelContext.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClBackendModelContext.hpp"
+
+namespace
+{
+
+bool ParseBool(const armnn::BackendOptions::Var& value, bool defaultValue)
+{
+ if (value.IsBool())
+ {
+ return value.AsBool();
+ }
+ return defaultValue;
+}
+
+} // namespace anonymous
+
+namespace armnn
+{
+
+ClBackendModelContext::ClBackendModelContext(const ModelOptions& modelOptions)
+ : m_IsFastMathEnabled(false)
+{
+ if (!modelOptions.empty())
+ {
+ ParseOptions(modelOptions, "GpuAcc", [&](std::string name, const BackendOptions::Var& value)
+ {
+ if (name == "FastMathEnabled")
+ {
+ m_IsFastMathEnabled |= ParseBool(value, false);
+ }
+ });
+ }
+}
+
+bool ClBackendModelContext::IsFastMathEnabled() const
+{
+ return m_IsFastMathEnabled;
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/ClBackendModelContext.hpp b/src/backends/cl/ClBackendModelContext.hpp
new file mode 100644
index 0000000000..59f7f8ff92
--- /dev/null
+++ b/src/backends/cl/ClBackendModelContext.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IBackendContext.hpp>
+
+namespace armnn
+{
+
+class ClBackendModelContext : public IBackendModelContext
+{
+public:
+ ClBackendModelContext(const ModelOptions& modelOptions);
+
+ bool IsFastMathEnabled() const;
+
+private:
+ bool m_IsFastMathEnabled;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 1af5c91fc5..7c1466e0e1 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -5,14 +5,17 @@
#include "ClLayerSupport.hpp"
#include "ClBackendId.hpp"
+#include "ClBackendModelContext.hpp"
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/BackendRegistry.hpp>
#include <InternalTypes.hpp>
#include <LayerSupportCommon.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
#if defined(ARMCOMPUTECL_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -155,6 +158,16 @@ bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
}
} // anonymous namespace
+ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
+ : m_ModelContextPtr(modelContextPtr)
+{
+}
+
+ClLayerSupport::ClLayerSupport()
+ : m_ModelContextPtr(nullptr)
+{
+}
+
bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -322,13 +335,29 @@ bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
+ bool isFastMathEnabled = false;
+#if defined(ARMCOMPUTECL_ENABLED)
+ if (m_ModelContextPtr)
+ {
+ if (m_ModelContextPtr.get() != nullptr)
+ {
+ auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+ if (modelOptions)
+ {
+ isFastMathEnabled = modelOptions->IsFastMathEnabled();
+ }
+ }
+ }
+#endif
+
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
reasonIfUnsupported,
input,
output,
descriptor,
weights,
- biases);
+ biases,
+ isFastMathEnabled);
}
bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index ed0486e33b..d7e2553278 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -4,6 +4,8 @@
//
#pragma once
+#include <armnn/backends/IBackendInternal.hpp>
+
#include <backendsCommon/LayerSupportBase.hpp>
namespace armnn
@@ -12,6 +14,10 @@ namespace armnn
class ClLayerSupport : public LayerSupportBase
{
public:
+ explicit ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr);
+ ClLayerSupport();
+ ~ClLayerSupport() {}
+
ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
bool IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
@@ -318,6 +324,9 @@ public:
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+private:
+ const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
+
};
} // namespace armnn
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 269057a952..9cbe21edca 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -16,6 +16,7 @@ ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
BACKEND_SOURCES := \
ClBackend.cpp \
ClBackendContext.cpp \
+ ClBackendModelContext.cpp \
ClContextControl.cpp \
ClLayerSupport.cpp \
ClRegistryInitializer.cpp \
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index c2a8005e8a..2797080360 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -100,4 +100,34 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
}
+BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
+{
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ armnn::OptimizerOptions optimizerOptions;
+ armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
+ optimizerOptions.m_ModelOptions.push_back(modelOptions);
+
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+ *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+
+ BOOST_CHECK(optimizedNet);
+
+ auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+
+ BOOST_TEST(modelOptionsOut.size() == 1);
+ BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+ BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+}
+
BOOST_AUTO_TEST_SUITE_END();
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 73ec95ce9f..42c9903dc4 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -24,7 +24,8 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const Optional<TensorInfo>& biases)
+ const Optional<TensorInfo>& biases,
+ bool isFastMathEnabled)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -52,7 +53,9 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
&aclOutputInfo,
layerInfo,
arm_compute::WeightsInfo(),
- aclDilationInfo);
+ aclDilationInfo,
+ arm_compute::ActivationLayerInfo(),
+ isFastMathEnabled);
}
ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 6d7e9f3ea1..8b0afada36 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -22,7 +22,8 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
- const Optional<TensorInfo>& biases);
+ const Optional<TensorInfo>& biases,
+ bool isFastMathEnabled = false);
class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
{
diff --git a/src/backends/neon/CMakeLists.txt b/src/backends/neon/CMakeLists.txt
index 327276c5fc..4654de5cab 100644
--- a/src/backends/neon/CMakeLists.txt
+++ b/src/backends/neon/CMakeLists.txt
@@ -8,6 +8,8 @@ if(ARMCOMPUTENEON)
NeonBackend.cpp
NeonBackend.hpp
NeonBackendId.hpp
+ NeonBackendModelContext.hpp
+ NeonBackendModelContext.cpp
NeonInterceptorScheduler.hpp
NeonInterceptorScheduler.cpp
NeonLayerSupport.cpp
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 01cc6d8119..31e08ceaf5 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -5,6 +5,7 @@
#include "NeonBackend.hpp"
#include "NeonBackendId.hpp"
+#include "NeonBackendModelContext.hpp"
#include "NeonWorkloadFactory.hpp"
#include "NeonLayerSupport.hpp"
#include "NeonTensorHandleFactory.hpp"
@@ -75,9 +76,27 @@ IBackendInternal::Optimizations NeonBackend::GetOptimizations() const
return Optimizations{};
}
+IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext(
+ const ModelOptions& modelOptions) const
+{
+ return IBackendSpecificModelContextPtr{new NeonBackendModelContext{modelOptions}};
+}
+
IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport() const
{
- static ILayerSupportSharedPtr layerSupport{new NeonLayerSupport};
+ static ILayerSupportSharedPtr layerSupport
+ {
+ new NeonLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
+ };
+ return layerSupport;
+}
+
+IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport(const ModelOptions& modelOptions) const
+{
+ static ILayerSupportSharedPtr layerSupport
+ {
+ new NeonLayerSupport(CreateBackendSpecificModelContext(modelOptions))
+ };
return layerSupport;
}
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index ad4ac8dde2..6458eccb6b 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -31,12 +31,16 @@ public:
const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override;
IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+ IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;
void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) override;
+
+ IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(
+ const ModelOptions& modelOptions) const override;
};
} // namespace armnn
diff --git a/src/backends/neon/NeonBackendModelContext.cpp b/src/backends/neon/NeonBackendModelContext.cpp
new file mode 100644
index 0000000000..2be71e5ded
--- /dev/null
+++ b/src/backends/neon/NeonBackendModelContext.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonBackendModelContext.hpp"
+
+namespace
+{
+
+bool ParseBool(const armnn::BackendOptions::Var& value, bool defaultValue)
+{
+ if (value.IsBool())
+ {
+ return value.AsBool();
+ }
+ return defaultValue;
+}
+
+} // namespace anonymous
+
+namespace armnn
+{
+
+NeonBackendModelContext::NeonBackendModelContext(const ModelOptions& modelOptions)
+ : m_IsFastMathEnabled(false)
+{
+ if (!modelOptions.empty())
+ {
+ ParseOptions(modelOptions, "CpuAcc", [&](std::string name, const BackendOptions::Var& value)
+ {
+ if (name == "FastMathEnabled")
+ {
+ m_IsFastMathEnabled |= ParseBool(value, false);
+ }
+ });
+ }
+}
+
+bool NeonBackendModelContext::IsFastMathEnabled() const
+{
+ return m_IsFastMathEnabled;
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/NeonBackendModelContext.hpp b/src/backends/neon/NeonBackendModelContext.hpp
new file mode 100644
index 0000000000..938d8af1cd
--- /dev/null
+++ b/src/backends/neon/NeonBackendModelContext.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IBackendContext.hpp>
+
+namespace armnn
+{
+
+class NeonBackendModelContext : public IBackendModelContext
+{
+public:
+ NeonBackendModelContext(const ModelOptions& modelOptions);
+
+ bool IsFastMathEnabled() const;
+
+private:
+ bool m_IsFastMathEnabled;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 9dc8a01778..853a518b45 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -5,6 +5,7 @@
#include "NeonLayerSupport.hpp"
#include "NeonBackendId.hpp"
+#include "NeonBackendModelContext.hpp"
#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
@@ -15,6 +16,7 @@
#include <InternalTypes.hpp>
#include <LayerSupportCommon.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#if defined(ARMCOMPUTENEON_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
@@ -125,6 +127,16 @@ inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfU
#endif
} // anonymous namespace
+NeonLayerSupport::NeonLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
+ : m_ModelContextPtr(modelContextPtr)
+{
+}
+
+NeonLayerSupport::NeonLayerSupport()
+ : m_ModelContextPtr(nullptr)
+{
+}
+
bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -311,13 +323,29 @@ bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
+ bool isFastMathEnabled = false;
+#if defined(ARMCOMPUTENEON_ENABLED)
+ if (m_ModelContextPtr)
+ {
+ if (m_ModelContextPtr.get() != nullptr)
+ {
+ auto modelOptions = armnn::PolymorphicDowncast<NeonBackendModelContext*>(m_ModelContextPtr.get());
+ if (modelOptions)
+ {
+ isFastMathEnabled = modelOptions->IsFastMathEnabled();
+ }
+ }
+ }
+#endif
+
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
reasonIfUnsupported,
input,
output,
descriptor,
weights,
- biases);
+ biases,
+ isFastMathEnabled);
}
bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index bdc905d17e..d477dcdd7c 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -4,6 +4,8 @@
//
#pragma once
+#include <armnn/backends/IBackendInternal.hpp>
+
#include <backendsCommon/LayerSupportBase.hpp>
namespace armnn
@@ -12,6 +14,11 @@ namespace armnn
class NeonLayerSupport : public LayerSupportBase
{
public:
+ explicit NeonLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr);
+ NeonLayerSupport();
+
+ ~NeonLayerSupport() {}
+
ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
bool IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
@@ -327,6 +334,9 @@ public:
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+private:
+ const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
+
}; // class NeonLayerSupport
} // namespace armnn
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index aeee9154ad..9bd08a1033 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -15,6 +15,7 @@ ifeq ($(ARMNN_COMPUTE_NEON_ENABLED),1)
BACKEND_SOURCES := \
NeonBackend.cpp \
+ NeonBackendModelContext.cpp \
NeonInterceptorScheduler.cpp \
NeonLayerSupport.cpp \
NeonRegistryInitializer.cpp \
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index d552c17509..4c27aca6c3 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -70,4 +70,34 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
BOOST_CHECK(!optNet);
}
+BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
+{
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ armnn::OptimizerOptions optimizerOptions;
+ armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
+ optimizerOptions.m_ModelOptions.push_back(modelOptions);
+
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+ *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+
+ BOOST_CHECK(optimizedNet);
+
+ auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+
+ BOOST_TEST(modelOptionsOut.size() == 1);
+ BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+ BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+}
+
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 144baec0ca..83f761158a 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -21,10 +21,11 @@ namespace armnn
using namespace armcomputetensorutils;
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases)
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ bool isFastMathEnabled)
{
const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -52,7 +53,9 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
&aclOutputInfo,
layerInfo,
arm_compute::WeightsInfo(),
- aclDilationInfo);
+ aclDilationInfo,
+ arm_compute::ActivationLayerInfo(),
+ isFastMathEnabled);
}
NeonConvolution2dWorkload::NeonConvolution2dWorkload(
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
index 3fb408dbaa..54e08a2042 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
@@ -17,10 +17,11 @@ namespace armnn
{
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases);
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ bool isFastMathEnabled = false);
class NeonConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
{