aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2019-09-05 17:50:44 +0100
committerNikhil Raj <nikhil.raj@arm.com>2019-09-05 17:50:44 +0100
commitee391d59dbe3305734de4ff7d98c27c8a5252624 (patch)
tree642caa74ee548d15d7dde1ba686ac4f479545d2c
parent02356de9da5d5480b4b9b413d1b6b33daf7ab717 (diff)
downloadarmnn-ee391d59dbe3305734de4ff7d98c27c8a5252624.tar.gz
IVGCVSW-3722 Add front end support for ArgMinMax
Change-Id: I31c5616bea3097f30cde68442d3222e0b0fe2235 Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/Descriptors.hpp9
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/ILayerSupport.hpp6
-rw-r--r--include/armnn/ILayerVisitor.hpp8
-rw-r--r--include/armnn/INetwork.hpp7
-rw-r--r--include/armnn/LayerVisitorBase.hpp4
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp10
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp52
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp44
-rw-r--r--src/armnnSerializer/Serializer.cpp9
-rw-r--r--src/armnnSerializer/Serializer.hpp4
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp7
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp23
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadDataFwd.hpp1
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp20
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
25 files changed, 235 insertions, 0 deletions
diff --git a/Android.mk b/Android.mk
index 21b186e9ca..3640e0c43b 100644
--- a/Android.mk
+++ b/Android.mk
@@ -115,6 +115,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/AbsLayer.cpp \
src/armnn/layers/ActivationLayer.cpp \
src/armnn/layers/AdditionLayer.cpp \
+ src/armnn/layers/ArgMinMaxLayer.cpp \
src/armnn/layers/BatchNormalizationLayer.cpp \
src/armnn/layers/BatchToSpaceNdLayer.cpp \
src/armnn/layers/ConcatLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e4ca1fcf08..ef87c6d1fc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -235,6 +235,8 @@ list(APPEND armnn_sources
src/armnn/layers/ActivationLayer.cpp
src/armnn/layers/AdditionLayer.hpp
src/armnn/layers/AdditionLayer.cpp
+ src/armnn/layers/ArgMinMaxLayer.hpp
+ src/armnn/layers/ArgMinMaxLayer.cpp
src/armnn/layers/BatchNormalizationLayer.hpp
src/armnn/layers/BatchNormalizationLayer.cpp
src/armnn/layers/BatchToSpaceNdLayer.hpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 9630d86197..87f4bdb40e 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -30,6 +30,15 @@ struct ActivationDescriptor
float m_B;
};
+/// An ArgMinMaxDescriptor for ArgMinMaxLayer
+struct ArgMinMaxDescriptor
+{
+ ArgMinMaxDescriptor()
+ : m_Axis(-1) {}
+
+ int m_Axis;
+};
+
/// A PermuteDescriptor for the PermuteLayer.
struct PermuteDescriptor
{
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index eddf91f4ce..8f81b4fe3e 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -8,6 +8,7 @@
namespace armnn
{
struct ActivationDescriptor;
+struct ArgMinMaxDescriptor;
struct BatchNormalizationDescriptor;
struct BatchToSpaceNdDescriptor;
struct Convolution2dDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index c67569bf00..d168226402 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -14,6 +14,7 @@
#include <functional>
#include <memory>
#include <vector>
+#include "ArmNN.hpp"
namespace armnn
{
@@ -41,6 +42,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsArgMinMaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ArgMinMaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index a22de878ca..a504a4190d 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -41,6 +41,14 @@ public:
virtual void VisitAdditionLayer(const IConnectableLayer* layer,
const char* name = nullptr) = 0;
+ /// Function that an arg min max layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param argMinMaxDescriptor - ArgMinMaxDescriptor to configure the activation.
+ /// @param name - Optional name for the layer.
+ virtual void VisitArgMinMaxLayer(const IConnectableLayer* layer,
+ const ArgMinMaxDescriptor& argMinMaxDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function that a batch normalization layer should call back to when its Accept(ILayerVisitor&)
/// function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index ce0fda2707..cd1b7a6319 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -102,6 +102,13 @@ public:
/// @return - Interface for configuring the layer.
virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0;
+ /// Adds an ArgMinMax layer to the network.
+ /// @param desc - Parameters for the L2 normalization operation.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+ const char* name = nullptr) = 0;
+
/// Adds a concatenation layer to the network.
/// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
/// process. Number of Views must be equal to the number of inputs, and their order
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 363a09154d..0739b43736 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -39,6 +39,10 @@ public:
void VisitAdditionLayer(const IConnectableLayer*,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitArgMinMaxLayer(const IConnectableLayer*,
+ const ArgMinMaxDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitBatchNormalizationLayer(const IConnectableLayer*,
const BatchNormalizationDescriptor&,
const ConstTensor&,
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 6141f27df5..98308f92a1 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -17,6 +17,7 @@ enum class LayerType
Abs = FirstLayer,
Activation,
Addition,
+ ArgMinMax,
BatchNormalization,
BatchToSpaceNd,
Concat,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index d730205981..f88e4e1cc9 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -91,6 +91,16 @@ bool IsAdditionSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
}
+bool IsArgMinMaxSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const ArgMinMaxDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor);
+}
+
bool IsBatchNormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 94a3b89912..6e4cf6ab04 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -9,6 +9,7 @@
#include "layers/AbsLayer.hpp"
#include "layers/ActivationLayer.hpp"
#include "layers/AdditionLayer.hpp"
+#include "layers/ArgMinMaxLayer.hpp"
#include "layers/BatchNormalizationLayer.hpp"
#include "layers/BatchToSpaceNdLayer.hpp"
#include "layers/ConcatLayer.hpp"
@@ -89,6 +90,7 @@ constexpr LayerType LayerEnumOf(const T* = nullptr);
DECLARE_LAYER(Abs)
DECLARE_LAYER(Activation)
DECLARE_LAYER(Addition)
+DECLARE_LAYER(ArgMinMax)
DECLARE_LAYER(BatchNormalization)
DECLARE_LAYER(BatchToSpaceNd)
DECLARE_LAYER(Concat)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index dc26a1b372..6971cb89ba 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1116,6 +1116,12 @@ IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activ
return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
}
+IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
+}
+
IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
normalizationDescriptor,
const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 4516c0a8f9..aac875aac7 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -36,6 +36,9 @@ public:
IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override;
+ IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
new file mode 100644
index 0000000000..aad95eb0cf
--- /dev/null
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ArgMinMaxLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::ArgMinMax, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ ArgMinMaxQueueDescriptor descriptor;
+ return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ArgMinMaxLayer>(graph, m_Param, GetName());
+}
+
+void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
new file mode 100644
index 0000000000..ca1337f065
--- /dev/null
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a ArgMinMax operation.
+class ArgMinMaxLayer : public LayerWithParameters<ArgMinMaxDescriptor>
+{
+public:
+ /// Makes a workload for the ArgMinMax type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ArgMinMaxLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ArgMinMaxLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ArgMinMaxLayer.
+ /// @param [in] param ArgMinMaxDescriptor to configure the ArgMinMax operation.
+ /// @param [in] name Optional name for the layer.
+ ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ArgMinMaxLayer() = default;
+};
+
+} \ No newline at end of file
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 56d313f97b..a8d9c23c04 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -146,6 +146,15 @@ void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer
CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
}
+// Build FlatBuffer for ArgMinMax Layer
+void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer,
+ const armnn::ArgMinMaxDescriptor& descriptor,
+ const char *name)
+{
+ // This will be implemented in IVGCVSW-3724
+ throw UnimplementedException("SerializerVisitor::VisitArgMinMaxLayer is not implemented");
+}
+
// Build FlatBuffer for BatchToSpaceNd Layer
void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
const armnn::BatchToSpaceNdDescriptor& descriptor,
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 7400885ab1..190ed231e3 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -52,6 +52,10 @@ public:
void VisitAdditionLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
+ void VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer,
+ const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor,
+ const char* name = nullptr) override;
+
void VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
const armnn::BatchToSpaceNdDescriptor& descriptor,
const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 464ec4e108..a8d1eaddc3 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -57,6 +57,13 @@ bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
+ const armnn::ArgMinMaxDescriptor& descriptor,
+ armnn::Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 3cf3d4e672..25dbdf2906 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -27,6 +27,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsArgMinMaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ArgMinMaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index fed159bd60..e7e6d5235d 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -461,6 +461,29 @@ void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
+void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+ ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"SoftmaxQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index a43c7cccdb..751d37fd25 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -132,6 +132,11 @@ struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescr
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct ArgMinMaxQueueDescriptor : QueueDescriptorWithParameters<ArgMinMaxDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Fully connected layer workload data.
struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnectedDescriptor>
{
diff --git a/src/backends/backendsCommon/WorkloadDataFwd.hpp b/src/backends/backendsCommon/WorkloadDataFwd.hpp
index abee3166f4..d4352c7a8a 100644
--- a/src/backends/backendsCommon/WorkloadDataFwd.hpp
+++ b/src/backends/backendsCommon/WorkloadDataFwd.hpp
@@ -14,6 +14,7 @@ struct SoftmaxQueueDescriptor;
struct SplitterQueueDescriptor;
struct ConcatQueueDescriptor;
struct ActivationQueueDescriptor;
+struct ArgMinMaxQueueDescriptor;
struct FullyConnectedQueueDescriptor;
struct PermuteQueueDescriptor;
struct Pooling2dQueueDescriptor;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 9d081af8e9..17bd98b349 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -102,6 +102,20 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::ArgMinMax:
+ {
+ auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
+ const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
+
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsArgMinMaxSupported(
+ OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ descriptor,
+ reason);
+ break;
+ }
case LayerType::BatchNormalization:
{
auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
@@ -979,6 +993,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueD
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index d0164b202d..6fd334b49c 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -58,6 +58,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index caf5e588f7..1dc9e9700f 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -391,6 +391,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Activation)
DECLARE_LAYER_POLICY_1_PARAM(Addition)
+DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
+
DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)