aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-10-03 11:42:42 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:58 +0100
commitdcb751f338b86c811a35bd0a9413ba9b8df7b718 (patch)
treebda3027fe7d958383eaa85105f611bfc07c3838c
parent3041e3036398048b4f8b72e0a321d77cd222a2fa (diff)
downloadarmnn-dcb751f338b86c811a35bd0a9413ba9b8df7b718.tar.gz
IVGCVSW-1642 : introducing the IBackend interface
Change-Id: Iaadee0a08c0594c9a3c802a48fe346e15f2cbbb2
-rw-r--r--include/armnn/Types.hpp31
-rw-r--r--src/armnn/DeviceSpec.hpp8
-rw-r--r--src/armnn/test/NeonTimerTest.cpp1
-rw-r--r--src/backends/OutputHandler.cpp2
-rw-r--r--src/backends/Workload.hpp2
-rw-r--r--src/backends/WorkloadData.cpp1
-rw-r--r--src/backends/WorkloadData.hpp1
-rw-r--r--src/backends/WorkloadFactory.cpp17
-rw-r--r--src/backends/WorkloadFactory.hpp14
-rw-r--r--src/backends/test/ActivationTestImpl.hpp1
-rw-r--r--src/backends/test/BatchNormTestImpl.hpp1
-rw-r--r--src/backends/test/Conv2dTestImpl.hpp1
-rw-r--r--src/backends/test/ConvertFp16ToFp32TestImpl.hpp1
-rw-r--r--src/backends/test/ConvertFp32ToFp16TestImpl.hpp1
-rw-r--r--src/backends/test/LstmTestImpl.hpp1
-rw-r--r--src/backends/test/PermuteTestImpl.hpp1
-rw-r--r--src/backends/test/Pooling2dTestImpl.hpp1
-rw-r--r--src/backends/test/ReshapeTestImpl.hpp1
-rw-r--r--src/backends/test/SoftmaxTestImpl.hpp1
-rw-r--r--src/backends/test/SplitterTestImpl.hpp1
-rw-r--r--src/backends/test/WorkloadTestUtils.hpp1
21 files changed, 54 insertions, 35 deletions
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 172df1b454..d0a0174ecd 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -5,6 +5,7 @@
#pragma once
#include <array>
+#include <memory>
namespace armnn
{
@@ -67,9 +68,9 @@ enum class PoolingAlgorithm
enum class PaddingMethod
{
/// The padding fields count, but are ignored
- IgnoreValue = 0,
+ IgnoreValue = 0,
/// The padding fields don't count and are ignored
- Exclude = 1
+ Exclude = 1
};
enum class NormalizationAlgorithmChannel
@@ -80,9 +81,9 @@ enum class NormalizationAlgorithmChannel
enum class NormalizationAlgorithmMethod
{
- /// Krichevsky 2012: Local Brightness Normalization
- LocalBrightness = 0,
- /// Jarret 2009: Local Contrast Normalization
+ /// Krichevsky 2012: Local Brightness Normalization
+ LocalBrightness = 0,
+ /// Jarret 2009: Local Contrast Normalization
LocalContrast = 1
};
@@ -95,14 +96,28 @@ enum class OutputShapeRounding
enum class Compute
{
/// CPU Execution: Reference C++ kernels
- CpuRef = 0,
+ CpuRef = 0,
/// CPU Execution: NEON: ArmCompute
- CpuAcc = 1,
+ CpuAcc = 1,
/// GPU Execution: OpenCL: ArmCompute
- GpuAcc = 2,
+ GpuAcc = 2,
Undefined = 5
};
+/// Each backend should implement an IBackend.
+class IBackend
+{
+protected:
+ IBackend() {}
+ virtual ~IBackend() {}
+
+public:
+ virtual const std::string& GetId() const = 0;
+};
+
+using IBackendPtr = std::shared_ptr<IBackend>;
+
+/// Device specific knowledge to be passed to the optimizer.
class IDeviceSpec
{
protected:
diff --git a/src/armnn/DeviceSpec.hpp b/src/armnn/DeviceSpec.hpp
index 2eb817479f..dbc04f0af6 100644
--- a/src/armnn/DeviceSpec.hpp
+++ b/src/armnn/DeviceSpec.hpp
@@ -4,8 +4,9 @@
//
#pragma once
-#include "armnn/Types.hpp"
+#include <armnn/Types.hpp>
#include <set>
+#include <vector>
namespace armnn
{
@@ -16,6 +17,11 @@ public:
DeviceSpec() {}
virtual ~DeviceSpec() {}
+ virtual std::vector<IBackendPtr> GetBackends() const
+ {
+ return std::vector<IBackendPtr>();
+ }
+
std::set<Compute> m_SupportedComputeDevices;
};
diff --git a/src/armnn/test/NeonTimerTest.cpp b/src/armnn/test/NeonTimerTest.cpp
index f82924e993..6d0429c8b9 100644
--- a/src/armnn/test/NeonTimerTest.cpp
+++ b/src/armnn/test/NeonTimerTest.cpp
@@ -11,7 +11,6 @@
#include <armnn/TypesUtils.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <backends/neon/NeonWorkloadFactory.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/WorkloadFactory.hpp>
#include <backends/test/LayerTests.hpp>
#include <backends/test/TensorCopyUtils.hpp>
diff --git a/src/backends/OutputHandler.cpp b/src/backends/OutputHandler.cpp
index 15e90c7ba8..5516c221c5 100644
--- a/src/backends/OutputHandler.cpp
+++ b/src/backends/OutputHandler.cpp
@@ -7,7 +7,7 @@
#include <boost/assert.hpp>
#include <boost/log/trivial.hpp>
-#include "WorkloadFactory.hpp"
+#include <backends/WorkloadFactory.hpp>
#include "WorkloadDataCollector.hpp"
#include "ITensorHandle.hpp"
diff --git a/src/backends/Workload.hpp b/src/backends/Workload.hpp
index cf9c6f21e5..4cfffd4646 100644
--- a/src/backends/Workload.hpp
+++ b/src/backends/Workload.hpp
@@ -12,7 +12,7 @@
namespace armnn
{
-// Workload interface to enqueue a layer computation.
+/// Workload interface to enqueue a layer computation.
class IWorkload
{
public:
diff --git a/src/backends/WorkloadData.cpp b/src/backends/WorkloadData.cpp
index 8b28b476b2..32ed97a052 100644
--- a/src/backends/WorkloadData.cpp
+++ b/src/backends/WorkloadData.cpp
@@ -5,7 +5,6 @@
#include "WorkloadData.hpp"
#include "CpuTensorHandle.hpp"
-#include "WorkloadInfo.hpp"
#include <algorithm>
#include <string>
diff --git a/src/backends/WorkloadData.hpp b/src/backends/WorkloadData.hpp
index 9fcc0447a3..aac2228695 100644
--- a/src/backends/WorkloadData.hpp
+++ b/src/backends/WorkloadData.hpp
@@ -10,6 +10,7 @@
#include <armnn/Tensor.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
+#include <backends/WorkloadInfo.hpp>
#include <InternalTypes.hpp>
#include <backends/OutputHandler.hpp>
diff --git a/src/backends/WorkloadFactory.cpp b/src/backends/WorkloadFactory.cpp
index a70097eb82..dc9c1bc624 100644
--- a/src/backends/WorkloadFactory.cpp
+++ b/src/backends/WorkloadFactory.cpp
@@ -2,7 +2,8 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "WorkloadFactory.hpp"
+#include <backends/WorkloadFactory.hpp>
+
#include <backends/reference/RefWorkloadFactory.hpp>
#include <backends/neon/NeonWorkloadFactory.hpp>
#include <backends/cl/ClWorkloadFactory.hpp>
@@ -53,12 +54,16 @@ namespace
}
}
-bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boost::optional<DataType> dataType,
- std::string& outReasonIfUnsupported)
+bool IWorkloadFactory::IsLayerSupported(Compute compute,
+ const IConnectableLayer& connectableLayer,
+ boost::optional<DataType> dataType,
+ std::string& outReasonIfUnsupported)
{
constexpr size_t reasonCapacity = 1024;
char reason[reasonCapacity];
bool result;
+ const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
+
switch(layer.GetType())
{
case LayerType::Activation:
@@ -583,10 +588,12 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
return result;
}
-bool IWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
+ boost::optional<DataType> dataType,
std::string& outReasonIfUnsupported)
{
- return IsLayerSupported(layer.GetComputeDevice(), layer, dataType, outReasonIfUnsupported);
+ auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
+ return IsLayerSupported(layer->GetComputeDevice(), connectableLayer, dataType, outReasonIfUnsupported);
}
}
diff --git a/src/backends/WorkloadFactory.hpp b/src/backends/WorkloadFactory.hpp
index 77e810c9ad..38448ca378 100644
--- a/src/backends/WorkloadFactory.hpp
+++ b/src/backends/WorkloadFactory.hpp
@@ -4,10 +4,10 @@
//
#pragma once
-#include "Workload.hpp"
#include <memory>
-#include "armnn/TensorFwd.hpp"
-#include "OutputHandler.hpp"
+#include <armnn/TensorFwd.hpp>
+#include <backends/OutputHandler.hpp>
+#include <backends/Workload.hpp>
#include <boost/optional.hpp>
namespace armnn
@@ -32,9 +32,13 @@ public:
/// Inform the memory manager to acquire memory
virtual void Acquire() { }
- static bool IsLayerSupported(Compute compute, const Layer& layer, boost::optional<DataType> dataType,
+ static bool IsLayerSupported(Compute compute,
+ const IConnectableLayer& layer,
+ boost::optional<DataType> dataType,
std::string& outReasonIfUnsupported);
- static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+
+ static bool IsLayerSupported(const IConnectableLayer& layer,
+ boost::optional<DataType> dataType,
std::string& outReasonIfUnsupported);
virtual bool SupportsSubTensors() const = 0;
diff --git a/src/backends/test/ActivationTestImpl.hpp b/src/backends/test/ActivationTestImpl.hpp
index e7d3d6a9c1..63716453cd 100644
--- a/src/backends/test/ActivationTestImpl.hpp
+++ b/src/backends/test/ActivationTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/BatchNormTestImpl.hpp b/src/backends/test/BatchNormTestImpl.hpp
index 35f4e4c89c..d551221ae1 100644
--- a/src/backends/test/BatchNormTestImpl.hpp
+++ b/src/backends/test/BatchNormTestImpl.hpp
@@ -6,7 +6,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/Conv2dTestImpl.hpp b/src/backends/test/Conv2dTestImpl.hpp
index ce193659a1..c593c7ba26 100644
--- a/src/backends/test/Conv2dTestImpl.hpp
+++ b/src/backends/test/Conv2dTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/ConvertFp16ToFp32TestImpl.hpp b/src/backends/test/ConvertFp16ToFp32TestImpl.hpp
index 483689df4c..2455e9691a 100644
--- a/src/backends/test/ConvertFp16ToFp32TestImpl.hpp
+++ b/src/backends/test/ConvertFp16ToFp32TestImpl.hpp
@@ -10,7 +10,6 @@
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/Half.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/ConvertFp32ToFp16TestImpl.hpp b/src/backends/test/ConvertFp32ToFp16TestImpl.hpp
index e4698a93e5..4eee274357 100644
--- a/src/backends/test/ConvertFp32ToFp16TestImpl.hpp
+++ b/src/backends/test/ConvertFp32ToFp16TestImpl.hpp
@@ -10,7 +10,6 @@
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/Half.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/LstmTestImpl.hpp b/src/backends/test/LstmTestImpl.hpp
index 7d57c86a67..a7e595c941 100644
--- a/src/backends/test/LstmTestImpl.hpp
+++ b/src/backends/test/LstmTestImpl.hpp
@@ -12,7 +12,6 @@
#include "QuantizeHelper.hpp"
#include <backends/CpuTensorHandle.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <backends/WorkloadFactory.hpp>
LayerTestResult<float, 2> LstmNoCifgNoPeepholeNoProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/test/PermuteTestImpl.hpp b/src/backends/test/PermuteTestImpl.hpp
index 2caf2c8d2b..9e5dda491f 100644
--- a/src/backends/test/PermuteTestImpl.hpp
+++ b/src/backends/test/PermuteTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/Pooling2dTestImpl.hpp b/src/backends/test/Pooling2dTestImpl.hpp
index 4c69fb9f46..c87548cd5b 100644
--- a/src/backends/test/Pooling2dTestImpl.hpp
+++ b/src/backends/test/Pooling2dTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/ReshapeTestImpl.hpp b/src/backends/test/ReshapeTestImpl.hpp
index cbd3b58798..198de53595 100644
--- a/src/backends/test/ReshapeTestImpl.hpp
+++ b/src/backends/test/ReshapeTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/SoftmaxTestImpl.hpp b/src/backends/test/SoftmaxTestImpl.hpp
index 7ca5f70e85..0bca8be49d 100644
--- a/src/backends/test/SoftmaxTestImpl.hpp
+++ b/src/backends/test/SoftmaxTestImpl.hpp
@@ -7,7 +7,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
#include "QuantizeHelper.hpp"
diff --git a/src/backends/test/SplitterTestImpl.hpp b/src/backends/test/SplitterTestImpl.hpp
index 4578ce5792..396cc1bcb2 100644
--- a/src/backends/test/SplitterTestImpl.hpp
+++ b/src/backends/test/SplitterTestImpl.hpp
@@ -6,7 +6,6 @@
#include <armnn/ArmNN.hpp>
#include <armnn/Tensor.hpp>
-#include <backends/WorkloadInfo.hpp>
#include <test/TensorHelpers.hpp>
diff --git a/src/backends/test/WorkloadTestUtils.hpp b/src/backends/test/WorkloadTestUtils.hpp
index a7b75309f7..97f8ebd7d2 100644
--- a/src/backends/test/WorkloadTestUtils.hpp
+++ b/src/backends/test/WorkloadTestUtils.hpp
@@ -5,7 +5,6 @@
#pragma once
#include <armnn/Tensor.hpp>
-#include <backends/WorkloadInfo.hpp>
namespace armnn
{