aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-09-05 15:00:38 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-25 14:54:29 +0100
commitc2044fe9d26a8b6afca48aee04bd5d29f8e27b8d (patch)
tree967206cdc246131ce1d834f094c4556ed3d62fac
parent2b7a1581f18b88b55153d4edc5cee0e602fd1bfc (diff)
downloadarmnn-c2044fe9d26a8b6afca48aee04bd5d29f8e27b8d.tar.gz
IVGCVSW-1804 : Add Subtraction layer types and placeholders
Change-Id: Ib9a477e5ce590df74ba05fece77258b9204f6523
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/LayerSupport.hpp7
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp3
-rw-r--r--src/armnn/LayerSupport.cpp10
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/backends/ClLayerSupport.cpp9
-rw-r--r--src/armnn/backends/ClLayerSupport.hpp5
-rw-r--r--src/armnn/backends/ClWorkloadFactory.cpp12
-rw-r--r--src/armnn/backends/ClWorkloadFactory.hpp3
-rw-r--r--src/armnn/backends/NeonLayerSupport.cpp9
-rw-r--r--src/armnn/backends/NeonLayerSupport.hpp5
-rw-r--r--src/armnn/backends/NeonWorkloadFactory.cpp12
-rw-r--r--src/armnn/backends/NeonWorkloadFactory.hpp3
-rw-r--r--src/armnn/backends/RefLayerSupport.cpp9
-rw-r--r--src/armnn/backends/RefLayerSupport.hpp5
-rw-r--r--src/armnn/backends/RefWorkloadFactory.cpp6
-rw-r--r--src/armnn/backends/RefWorkloadFactory.hpp2
-rw-r--r--src/armnn/backends/WorkloadData.cpp13
-rw-r--r--src/armnn/backends/WorkloadData.hpp6
-rw-r--r--src/armnn/backends/WorkloadFactory.cpp13
-rw-r--r--src/armnn/backends/WorkloadFactory.hpp3
-rw-r--r--src/armnn/backends/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp33
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp25
26 files changed, 199 insertions, 2 deletions
diff --git a/Android.mk b/Android.mk
index 89a7124988..a164535418 100644
--- a/Android.mk
+++ b/Android.mk
@@ -193,6 +193,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/PermuteLayer.cpp \
src/armnn/layers/Pooling2dLayer.cpp \
src/armnn/layers/DivisionLayer.cpp \
+ src/armnn/layers/SubtractionLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp \
src/armnn/layers/ResizeBilinearLayer.cpp \
src/armnn/layers/SoftmaxLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 802ca50cfd..7890cdfd02 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -341,6 +341,8 @@ list(APPEND armnn_sources
src/armnn/layers/SoftmaxLayer.cpp
src/armnn/layers/SplitterLayer.hpp
src/armnn/layers/SplitterLayer.cpp
+ src/armnn/layers/SubtractionLayer.cpp
+ src/armnn/layers/SubtractionLayer.hpp
src/armnn/Half.hpp
src/armnn/InternalTypes.hpp
src/armnn/InternalTypes.cpp
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index f129bba707..ac7d08ff62 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -80,6 +80,13 @@ bool IsDivisionSupported(Compute compute,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
+bool IsSubtractionSupported(Compute compute,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
bool IsInputSupported(Compute compute,
const TensorInfo& input,
char* reasonIfUnsupported = nullptr,
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 5d7984abe4..ee93d48717 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -40,6 +40,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::ResizeBilinear: return "ResizeBilinear";
case LayerType::Softmax: return "Softmax";
case LayerType::Splitter: return "Splitter";
+ case LayerType::Subtraction: return "Subtraction";
default:
BOOST_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 13b93ceddb..d2c83cdfee 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -39,9 +39,10 @@ enum class LayerType
Reshape,
ResizeBilinear,
Softmax,
+ Splitter,
// Last layer goes here.
LastLayer,
- Splitter = LastLayer,
+ Subtraction = LastLayer,
};
const char* GetLayerTypeAsCString(LayerType type);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 7ac054cad9..59c1c8dbeb 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -151,6 +151,16 @@ bool IsDivisionSupported(Compute compute,
FORWARD_LAYER_SUPPORT_FUNC(compute, IsDivisionSupported, input0, input1, output);
}
+bool IsSubtractionSupported(Compute compute,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(compute, IsSubtractionSupported, input0, input1, output);
+}
+
bool IsDepthwiseConvolutionSupported(Compute compute,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index f7ebd375b6..a1dc3555e7 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -32,6 +32,7 @@
#include "layers/ResizeBilinearLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
#include "layers/SplitterLayer.hpp"
+#include "layers/SubtractionLayer.hpp"
namespace armnn
{
@@ -86,5 +87,6 @@ DECLARE_LAYER(Reshape)
DECLARE_LAYER(ResizeBilinear)
DECLARE_LAYER(Softmax)
DECLARE_LAYER(Splitter)
+DECLARE_LAYER(Subtraction)
}
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp
index 3a9a22a625..7b5fee2175 100644
--- a/src/armnn/backends/ClLayerSupport.cpp
+++ b/src/armnn/backends/ClLayerSupport.cpp
@@ -250,6 +250,15 @@ bool IsDivisionSupportedCl(const TensorInfo& input0,
output);
}
+bool IsSubtractionSupportedCl(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported)
+{
+ // At the moment subtraction is not supported
+ return false;
+}
+
bool IsFullyConnectedSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp
index be56d5d0d6..dbe546c18d 100644
--- a/src/armnn/backends/ClLayerSupport.hpp
+++ b/src/armnn/backends/ClLayerSupport.hpp
@@ -59,6 +59,11 @@ bool IsDivisionSupportedCl(const TensorInfo& input0,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsSubtractionSupportedCl(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported = nullptr);
+
bool IsFullyConnectedSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
diff --git a/src/armnn/backends/ClWorkloadFactory.cpp b/src/armnn/backends/ClWorkloadFactory.cpp
index d2f3b11fb2..8c9ca2081b 100644
--- a/src/armnn/backends/ClWorkloadFactory.cpp
+++ b/src/armnn/backends/ClWorkloadFactory.cpp
@@ -169,6 +169,12 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateDivision(
return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
@@ -435,6 +441,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueue
return nullptr;
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return nullptr;
+}
+
void ClWorkloadFactory::Finalize()
{
}
diff --git a/src/armnn/backends/ClWorkloadFactory.hpp b/src/armnn/backends/ClWorkloadFactory.hpp
index 901bf406f8..dedbb50a6d 100644
--- a/src/armnn/backends/ClWorkloadFactory.hpp
+++ b/src/armnn/backends/ClWorkloadFactory.hpp
@@ -111,6 +111,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual void Finalize() override;
virtual void Release() override;
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index f39871b1ad..73d251893f 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -234,6 +234,15 @@ bool IsDivisionSupportedNeon(const TensorInfo& input0,
return false;
}
+bool IsSubtractionSupportedNeon(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported)
+{
+ // At the moment subtraction is not supported
+ return false;
+}
+
bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp
index 1715f83655..f7b62536a6 100644
--- a/src/armnn/backends/NeonLayerSupport.hpp
+++ b/src/armnn/backends/NeonLayerSupport.hpp
@@ -64,6 +64,11 @@ bool IsDivisionSupportedNeon(const TensorInfo& input0,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsSubtractionSupportedNeon(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported = nullptr);
+
bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
diff --git a/src/armnn/backends/NeonWorkloadFactory.cpp b/src/armnn/backends/NeonWorkloadFactory.cpp
index c90362cce6..fe9fd55dc3 100644
--- a/src/armnn/backends/NeonWorkloadFactory.cpp
+++ b/src/armnn/backends/NeonWorkloadFactory.cpp
@@ -162,6 +162,12 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
+ const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
@@ -429,6 +435,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDivision(const DivisionQue
return nullptr;
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& data,
+ const WorkloadInfo& info) const
+{
+ return nullptr;
+}
+
void NeonWorkloadFactory::Finalize()
{}
diff --git a/src/armnn/backends/NeonWorkloadFactory.hpp b/src/armnn/backends/NeonWorkloadFactory.hpp
index 32e745f57b..34d0e9529d 100644
--- a/src/armnn/backends/NeonWorkloadFactory.hpp
+++ b/src/armnn/backends/NeonWorkloadFactory.hpp
@@ -111,6 +111,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual void Finalize() override;
virtual void Release() override;
diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp
index ee91e73df2..5437574789 100644
--- a/src/armnn/backends/RefLayerSupport.cpp
+++ b/src/armnn/backends/RefLayerSupport.cpp
@@ -130,6 +130,15 @@ bool IsDivisionSupportedRef(const TensorInfo& input0,
&TrueFunc<>);
}
+bool IsSubtractionSupportedRef(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported)
+{
+ // At the moment subtraction is not supported
+ return false;
+}
+
bool IsFullyConnectedSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp
index d396867f3d..464eb1c91c 100644
--- a/src/armnn/backends/RefLayerSupport.hpp
+++ b/src/armnn/backends/RefLayerSupport.hpp
@@ -56,6 +56,11 @@ bool IsDivisionSupportedRef(const TensorInfo& input0,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsSubtractionSupportedRef(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported = nullptr);
+
bool IsFullyConnectedSupportedRef(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
diff --git a/src/armnn/backends/RefWorkloadFactory.cpp b/src/armnn/backends/RefWorkloadFactory.cpp
index d4891b3837..4de9274eb8 100644
--- a/src/armnn/backends/RefWorkloadFactory.cpp
+++ b/src/armnn/backends/RefWorkloadFactory.cpp
@@ -227,4 +227,10 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateDivision(
return MakeWorkload<RefDivisionFloat32Workload, RefDivisionUint8Workload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateSubtraction(
+ const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
} // namespace armnn
diff --git a/src/armnn/backends/RefWorkloadFactory.hpp b/src/armnn/backends/RefWorkloadFactory.hpp
index 8586ca6909..5fbc6e40bd 100644
--- a/src/armnn/backends/RefWorkloadFactory.hpp
+++ b/src/armnn/backends/RefWorkloadFactory.hpp
@@ -127,6 +127,8 @@ public:
virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
private:
template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp
index 660637e96f..e3cf83fc6c 100644
--- a/src/armnn/backends/WorkloadData.cpp
+++ b/src/armnn/backends/WorkloadData.cpp
@@ -811,4 +811,17 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
"second input");
}
+void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateTwoInputs(workloadInfo, "SubtractionQueueDescriptor");
+ ValidateSingleOutput(workloadInfo, "SubtractionQueueDescriptor");
+
+ ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[1],
+ workloadInfo.m_OutputTensorInfos[0],
+ "SubtractionQueueDescriptor",
+ "first input",
+ "second input");
+}
+
} //namespace armnn
diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp
index d0b81632db..d50a237273 100644
--- a/src/armnn/backends/WorkloadData.hpp
+++ b/src/armnn/backends/WorkloadData.hpp
@@ -190,6 +190,12 @@ struct DivisionQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
+// Subtraction layer workload data.
+struct SubtractionQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Batch norm layer workload data.
struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNormalizationDescriptor>
{
diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp
index ba926e847c..d1887252c2 100644
--- a/src/armnn/backends/WorkloadFactory.cpp
+++ b/src/armnn/backends/WorkloadFactory.cpp
@@ -524,6 +524,19 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
reasonCapacity);
break;
}
+ case LayerType::Subtraction:
+ {
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = IsSubtractionSupported(compute,
+ OverrideDataType(input0, dataType),
+ OverrideDataType(input1, dataType),
+ OverrideDataType(output, dataType),
+ reason,
+ reasonCapacity);
+ break;
+ }
default:
{
BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
diff --git a/src/armnn/backends/WorkloadFactory.hpp b/src/armnn/backends/WorkloadFactory.hpp
index 771aecfec3..0ae5a3ea1d 100644
--- a/src/armnn/backends/WorkloadFactory.hpp
+++ b/src/armnn/backends/WorkloadFactory.hpp
@@ -123,6 +123,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
+
+ virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
};
} //namespace armnn
diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
index a580be32e5..7745972fdd 100644
--- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
+++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
@@ -350,7 +350,7 @@ DECLARE_LAYER_POLICY_2_PARAM(Softmax)
DECLARE_LAYER_POLICY_2_PARAM(Splitter)
-
+DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
// Generic implementation to get the number of input slots for a given layer type;
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
new file mode 100644
index 0000000000..6239868b11
--- /dev/null
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "SubtractionLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backends/WorkloadData.hpp>
+#include <backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+SubtractionLayer::SubtractionLayer(const char* name)
+ : ArithmeticBaseLayer(2, 1, LayerType::Subtraction, name)
+{
+}
+
+std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ SubtractionQueueDescriptor descriptor;
+ return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
+{
+ return CloneBase<SubtractionLayer>(graph, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
new file mode 100644
index 0000000000..ac02580200
--- /dev/null
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "ArithmeticBaseLayer.hpp"
+
+namespace armnn
+{
+
+class SubtractionLayer : public ArithmeticBaseLayer
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ SubtractionLayer* Clone(Graph& graph) const override;
+
+protected:
+ SubtractionLayer(const char* name);
+ ~SubtractionLayer() = default;
+};
+
+} // namespace