aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-12-13 12:48:25 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2018-12-14 11:34:52 +0000
commit59a950cefe927d002aa00d7c3af54f4389e00162 (patch)
treee5152463ef83a1ef42a6f44f17bc9c40b8eacde7
parent20e984f442483493ece882c3040785368eb31c96 (diff)
downloadarmnn-59a950cefe927d002aa00d7c3af54f4389e00162.tar.gz
IVGCVSW-2377 Add no-op factory implementations for all backends for the
Greater operation * Added QueueDescriptor in WorkloadData.hpp * Added CreateGreater function in WorkloadFactory.hpp * Added stub implementation of the CreateGreater function in RefWorkloadFactory, NeonWorkloadFactory and ClWorkloadFactory * Added GreaterLayer stub implementation * Renamed ArithmeticBaseLayer to ElementwiseBaseLayer Change-Id: I7e38c2936de905da921a92ba3f918478169ec7f5
-rw-r--r--Android.mk3
-rw-r--r--CMakeLists.txt6
-rw-r--r--include/armnn/ILayerSupport.hpp5
-rw-r--r--include/armnn/INetwork.hpp5
-rw-r--r--include/armnn/LayerSupport.hpp8
-rw-r--r--src/armnn/InternalTypes.cpp3
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp10
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/AdditionLayer.cpp3
-rw-r--r--src/armnn/layers/AdditionLayer.hpp5
-rw-r--r--src/armnn/layers/DivisionLayer.cpp3
-rw-r--r--src/armnn/layers/DivisionLayer.hpp5
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp (renamed from src/armnn/layers/ArithmeticBaseLayer.cpp)11
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp (renamed from src/armnn/layers/ArithmeticBaseLayer.hpp)7
-rw-r--r--src/armnn/layers/GreaterLayer.cpp34
-rw-r--r--src/armnn/layers/GreaterLayer.hpp26
-rw-r--r--src/armnn/layers/MaximumLayer.cpp5
-rw-r--r--src/armnn/layers/MaximumLayer.hpp5
-rw-r--r--src/armnn/layers/MinimumLayer.cpp3
-rw-r--r--src/armnn/layers/MinimumLayer.hpp4
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp3
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp5
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp3
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp5
-rw-r--r--src/backends/backendsCommon/ILayerSupport.cpp8
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp11
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp16
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp6
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp3
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp6
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp3
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp3
38 files changed, 209 insertions, 38 deletions
diff --git a/Android.mk b/Android.mk
index fdac0ddebb..e4033d5e11 100644
--- a/Android.mk
+++ b/Android.mk
@@ -84,7 +84,6 @@ LOCAL_SRC_FILES := \
src/armnnUtils/VerificationHelpers.cpp \
src/armnn/layers/ActivationLayer.cpp \
src/armnn/layers/AdditionLayer.cpp \
- src/armnn/layers/ArithmeticBaseLayer.cpp \
src/armnn/layers/BatchNormalizationLayer.cpp \
src/armnn/layers/BatchToSpaceNdLayer.cpp \
src/armnn/layers/ConstantLayer.cpp \
@@ -93,9 +92,11 @@ LOCAL_SRC_FILES := \
src/armnn/layers/ConvertFp32ToFp16Layer.cpp \
src/armnn/layers/DebugLayer.cpp \
src/armnn/layers/DepthwiseConvolution2dLayer.cpp \
+ src/armnn/layers/ElementwiseBaseLayer.cpp \
src/armnn/layers/FakeQuantizationLayer.cpp \
src/armnn/layers/FloorLayer.cpp \
src/armnn/layers/FullyConnectedLayer.cpp \
+ src/armnn/layers/GreaterLayer.cpp \
src/armnn/layers/InputLayer.cpp \
src/armnn/layers/L2NormalizationLayer.cpp \
src/armnn/layers/LstmLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 189a1abe73..370b94c63d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -176,8 +176,6 @@ list(APPEND armnn_sources
src/armnn/layers/ActivationLayer.cpp
src/armnn/layers/AdditionLayer.hpp
src/armnn/layers/AdditionLayer.cpp
- src/armnn/layers/ArithmeticBaseLayer.hpp
- src/armnn/layers/ArithmeticBaseLayer.cpp
src/armnn/layers/BatchNormalizationLayer.hpp
src/armnn/layers/BatchNormalizationLayer.cpp
src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -194,12 +192,16 @@ list(APPEND armnn_sources
src/armnn/layers/DebugLayer.cpp
src/armnn/layers/DepthwiseConvolution2dLayer.hpp
src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+ src/armnn/layers/ElementwiseBaseLayer.hpp
+ src/armnn/layers/ElementwiseBaseLayer.cpp
src/armnn/layers/FakeQuantizationLayer.hpp
src/armnn/layers/FakeQuantizationLayer.cpp
src/armnn/layers/FloorLayer.hpp
src/armnn/layers/FloorLayer.cpp
src/armnn/layers/FullyConnectedLayer.hpp
src/armnn/layers/FullyConnectedLayer.cpp
+ src/armnn/layers/GreaterLayer.cpp
+ src/armnn/layers/GreaterLayer.hpp
src/armnn/layers/InputLayer.hpp
src/armnn/layers/InputLayer.cpp
src/armnn/layers/L2NormalizationLayer.hpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 2498aa945d..71ad50d5be 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -208,6 +208,11 @@ public:
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
+ virtual bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
}; // class ILayerSupport
using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 33181ce138..7c88cf0f00 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -328,6 +328,11 @@ public:
/// @ return - Interface for configuring the layer.
virtual IConnectableLayer* AddMinimumLayer(const char* name = nullptr) = 0;
+ /// Add a Greater layer to the network.
+ /// @param name - Optional name for the layer.
+ /// @ return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddGreaterLayer(const char* name = nullptr) = 0;
+
protected:
~INetwork() {}
};
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 53efc15649..5b96bc6d01 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -287,4 +287,12 @@ bool IsMinimumSupported(const BackendId& backend,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
}
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index cbf6f8a257..845ba9ec0c 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -28,6 +28,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::FakeQuantization: return "FakeQuantization";
case LayerType::Floor: return "Floor";
case LayerType::FullyConnected: return "FullyConnected";
+ case LayerType::Greater: return "Greater";
case LayerType::Input: return "Input";
case LayerType::L2Normalization: return "L2Normalization";
case LayerType::Lstm: return "Lstm";
@@ -39,6 +40,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Multiplication: return "Multiplication";
case LayerType::Normalization: return "Normalization";
case LayerType::Output: return "Output";
+ case LayerType::Pad: return "Pad";
case LayerType::Permute: return "Permute";
case LayerType::Pooling2d: return "Pooling2d";
case LayerType::Reshape: return "Reshape";
@@ -48,7 +50,6 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Splitter: return "Splitter";
case LayerType::StridedSlice: return "StridedSlice";
case LayerType::Subtraction: return "Subtraction";
- case LayerType::Pad: return "Pad";
default:
BOOST_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 3d4f043051..27a1359f68 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -28,6 +28,7 @@ enum class LayerType
FakeQuantization,
Floor,
FullyConnected,
+ Greater,
Input,
L2Normalization,
Lstm,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 298b25c284..d1161b63b1 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -445,4 +445,14 @@ bool IsMinimumSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
}
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
+}
+
}
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index ebfa5db6b1..39b0b20b21 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -20,6 +20,7 @@
#include "layers/FakeQuantizationLayer.hpp"
#include "layers/FloorLayer.hpp"
#include "layers/FullyConnectedLayer.hpp"
+#include "layers/GreaterLayer.hpp"
#include "layers/InputLayer.hpp"
#include "layers/L2NormalizationLayer.hpp"
#include "layers/LstmLayer.hpp"
@@ -83,6 +84,7 @@ DECLARE_LAYER(Division)
DECLARE_LAYER(FakeQuantization)
DECLARE_LAYER(Floor)
DECLARE_LAYER(FullyConnected)
+DECLARE_LAYER(Greater)
DECLARE_LAYER(Input)
DECLARE_LAYER(L2Normalization)
DECLARE_LAYER(Lstm)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index ecab5041db..7a7e1800f2 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -717,6 +717,11 @@ IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& s
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
+IConnectableLayer* Network::AddGreaterLayer(const char* name)
+{
+ return m_Graph->AddLayer<GreaterLayer>(name);
+}
+
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index e65b1d5f77..31e86ac0b9 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -135,6 +135,8 @@ public:
IConnectableLayer* AddMinimumLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddGreaterLayer(const char* name = nullptr) override;
+
private:
IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index 0ccf398d37..27622664d5 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "AdditionLayer.hpp"
#include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
{
AdditionLayer::AdditionLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Addition, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Addition, name)
{
}
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 211a2ef65f..9cdf09fb4e 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -2,14 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class AdditionLayer : public ArithmeticBaseLayer
+class AdditionLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index f667dc9545..03164993bc 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DivisionLayer.hpp"
#include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
{
DivisionLayer::DivisionLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Division, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Division, name)
{
}
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index eaede4563b..158f8e8b5d 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -2,14 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class DivisionLayer : public ArithmeticBaseLayer
+class DivisionLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
diff --git a/src/armnn/layers/ArithmeticBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 3b89c229c3..761814176d 100644
--- a/src/armnn/layers/ArithmeticBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -2,7 +2,8 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "ArithmeticBaseLayer.hpp"
+
+#include "ElementwiseBaseLayer.hpp"
#include "InternalTypes.hpp"
#include "armnn/Exceptions.hpp"
@@ -13,13 +14,13 @@
namespace armnn
{
-ArithmeticBaseLayer::ArithmeticBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
- LayerType type, const char* name)
+ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
+ LayerType type, const char* name)
: Layer(numInputSlots, numOutputSlots, type, name)
{
}
-std::vector<TensorShape> ArithmeticBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
BOOST_ASSERT(inputShapes.size() == 2);
auto& input0 = inputShapes[0];
@@ -47,7 +48,7 @@ std::vector<TensorShape> ArithmeticBaseLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ArithmeticBaseLayer::ValidateTensorShapesFromInputs()
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
diff --git a/src/armnn/layers/ArithmeticBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index a8605a9b76..f0821ecfe9 100644
--- a/src/armnn/layers/ArithmeticBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
#include <Layer.hpp>
@@ -12,15 +13,15 @@ namespace armnn
/// NOTE: this is an abstract class, it does not implement:
/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
/// Layer* Clone(Graph& graph) const = 0;
-class ArithmeticBaseLayer : public Layer
+class ElementwiseBaseLayer : public Layer
{
public:
void ValidateTensorShapesFromInputs() override;
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
- ArithmeticBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
- ~ArithmeticBaseLayer() = default;
+ ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
+ ~ElementwiseBaseLayer() = default;
};
} // namespace
diff --git a/src/armnn/layers/GreaterLayer.cpp b/src/armnn/layers/GreaterLayer.cpp
new file mode 100644
index 0000000000..d40c17c71b
--- /dev/null
+++ b/src/armnn/layers/GreaterLayer.cpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GreaterLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+GreaterLayer::GreaterLayer(const char* name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Greater, name)
+{
+}
+
+std::unique_ptr<IWorkload> GreaterLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ GreaterQueueDescriptor descriptor;
+ return factory.CreateGreater(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+GreaterLayer* GreaterLayer::Clone(Graph& graph) const
+{
+ return CloneBase<GreaterLayer>(graph, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/GreaterLayer.hpp b/src/armnn/layers/GreaterLayer.hpp
new file mode 100644
index 0000000000..9297a82da4
--- /dev/null
+++ b/src/armnn/layers/GreaterLayer.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ElementwiseBaseLayer.hpp"
+
+namespace armnn
+{
+
+class GreaterLayer : public ElementwiseBaseLayer
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ GreaterLayer* Clone(Graph& graph) const override;
+
+protected:
+ GreaterLayer(const char* name);
+ ~GreaterLayer() = default;
+};
+
+} //namespace armnn
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 67a234216c..c0da8d6ef8 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -14,8 +14,9 @@ namespace armnn
{
MaximumLayer::MaximumLayer(const char* name)
-: ArithmeticBaseLayer(2, 1, LayerType::Maximum, name)
-{}
+ : ElementwiseBaseLayer(2, 1, LayerType::Maximum, name)
+{
+}
std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index da4c3ed18c..18a4ed31e2 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -2,14 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class MaximumLayer : public ArithmeticBaseLayer
+class MaximumLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 11f60ebcb1..8e76041813 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "MinimumLayer.hpp"
#include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
{
MinimumLayer::MinimumLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Minimum, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Minimum, name)
{
}
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index cd8a1688cf..43382374eb 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -5,12 +5,12 @@
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class MinimumLayer : public ArithmeticBaseLayer
+class MinimumLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 2abcf8609d..9448935660 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "MultiplicationLayer.hpp"
#include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
{
MultiplicationLayer::MultiplicationLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Multiplication, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Multiplication, name)
{
}
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 21b68e3e70..8a7bfdea39 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -2,14 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class MultiplicationLayer : public ArithmeticBaseLayer
+class MultiplicationLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 2b158acadd..18d8661dd6 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "SubtractionLayer.hpp"
#include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
{
SubtractionLayer::SubtractionLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Subtraction, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Subtraction, name)
{
}
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index ac02580200..d1bccfe81e 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -2,14 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class SubtractionLayer : public ArithmeticBaseLayer
+class SubtractionLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
diff --git a/src/backends/backendsCommon/ILayerSupport.cpp b/src/backends/backendsCommon/ILayerSupport.cpp
index 3f2636c61a..3718df1347 100644
--- a/src/backends/backendsCommon/ILayerSupport.cpp
+++ b/src/backends/backendsCommon/ILayerSupport.cpp
@@ -320,4 +320,12 @@ bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool ILayerSupport::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index b4bcfb0104..88b0d5e5c5 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -357,6 +357,11 @@ struct MinimumQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct GreaterQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct DebugQueueDescriptor : QueueDescriptorWithParameters<DebugDescriptor>
{
void Validate(const WorkloadInfo& workloadInfo) const;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 915d667fed..d7704ffad4 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -666,6 +666,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Greater:
+ {
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsGreaterSupported(OverrideDataType(input0, dataType),
+ OverrideDataType(input1, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
default:
{
BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index eb24b64dd7..57f61968c1 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -148,6 +148,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
};
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index e267988786..dc4f05eec4 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -342,6 +342,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Floor)
DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
+DECLARE_LAYER_POLICY_1_PARAM(Greater)
+
DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
@@ -516,7 +518,7 @@ template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
{
return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
-};
+}
// Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
@@ -527,14 +529,14 @@ bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
return v &&
IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
(factory, Tag<NextType(Type)>());
-};
+}
// Helper function to pass through to the test framework.
template<typename FactoryType, armnn::DataType DataType>
bool IsLayerSupportedTests(FactoryType *factory)
{
return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
-};
+}
template<armnn::LayerType Type>
bool TestLayerTypeMatches()
@@ -549,20 +551,20 @@ bool TestLayerTypeMatches()
bool v = Type == layer.m_Layer->GetType();
BOOST_CHECK_MESSAGE(v, ss.str());
return v;
-};
+}
template<armnn::LayerType Type>
bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
{
return TestLayerTypeMatches<Type>();
-};
+}
template<armnn::LayerType Type>
bool LayerTypeMatchesTestImpl(Tag<Type>)
{
return TestLayerTypeMatches<Type>() &&
LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
-};
+}
template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
@@ -584,6 +586,6 @@ bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
return result;
-};
+}
} //namespace
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index dfe627209c..ebd957b903 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -332,6 +332,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDe
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index e2c80de7af..70052c41e0 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -138,6 +138,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index aed2d56646..d7de17a83d 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -300,6 +300,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueue
return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index e95389433b..115dfb0458 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -139,6 +139,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index ac837d3acc..b34de8649b 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -294,6 +294,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedS
return MakeWorkload<RefStridedSliceFloat32Workload, RefStridedSliceUint8Workload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 9ee1fe5aa3..03b349db97 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -156,6 +156,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
private: