aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2018-09-13 11:07:48 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-01 14:56:48 +0100
commit32b9046ea74d2387a08819cf5e67c183e03f6d3f (patch)
tree87d1627aed07bc1c572d7b2121974e24778fe81a
parent5cd01f35e8975ca6d42eb5f2d5f121b57bdb8086 (diff)
downloadarmnn-32b9046ea74d2387a08819cf5e67c183e03f6d3f.tar.gz
IVGCVSW-1813 - Add MeanLayer
* add MeanLayer functionalities * modify MeanQueueDescriptor to use parameter * add IsMeanSupported placeholder for all backends Change-Id: Ic69a34a61df667849977aad9b38f9a01eef565b5
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/Descriptors.hpp15
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/INetwork.hpp6
-rw-r--r--include/armnn/LayerSupport.hpp7
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp10
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/backends/ClLayerSupport.cpp8
-rw-r--r--src/armnn/backends/ClLayerSupport.hpp5
-rw-r--r--src/armnn/backends/NeonLayerSupport.cpp8
-rw-r--r--src/armnn/backends/NeonLayerSupport.hpp5
-rw-r--r--src/armnn/backends/RefLayerSupport.cpp8
-rw-r--r--src/armnn/backends/RefLayerSupport.hpp5
-rw-r--r--src/armnn/backends/WorkloadData.cpp21
-rw-r--r--src/armnn/backends/WorkloadData.hpp11
-rw-r--r--src/armnn/backends/WorkloadFactory.cpp13
-rw-r--r--src/armnn/backends/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp105
-rw-r--r--src/armnn/layers/MeanLayer.hpp29
24 files changed, 245 insertions, 28 deletions
diff --git a/Android.mk b/Android.mk
index 9272aef4c4..db3c6b3af6 100644
--- a/Android.mk
+++ b/Android.mk
@@ -177,6 +177,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/InputLayer.cpp \
src/armnn/layers/L2NormalizationLayer.cpp \
src/armnn/layers/LstmLayer.cpp \
+ src/armnn/layers/MeanLayer.cpp \
src/armnn/layers/MemCopyLayer.cpp \
src/armnn/layers/MergerLayer.cpp \
src/armnn/layers/MultiplicationLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4453a85cd2..7656c5d329 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -305,6 +305,8 @@ list(APPEND armnn_sources
src/armnn/layers/L2NormalizationLayer.cpp
src/armnn/layers/LstmLayer.cpp
src/armnn/layers/LstmLayer.hpp
+ src/armnn/layers/MeanLayer.hpp
+ src/armnn/layers/MeanLayer.cpp
src/armnn/layers/MemCopyLayer.hpp
src/armnn/layers/MemCopyLayer.cpp
src/armnn/layers/MergerLayer.hpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index decbf99880..5f9df6b2c3 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -332,4 +332,19 @@ struct LstmDescriptor
bool m_ProjectionEnabled;
};
+struct MeanDescriptor
+{
+ MeanDescriptor()
+ : m_KeepDims(false)
+ {}
+
+ MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
+ : m_Axis(axis)
+ , m_KeepDims(keepDims)
+ {}
+
+ std::vector<unsigned int> m_Axis;
+ bool m_KeepDims;
+};
+
}
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index ed958fc237..b161df8827 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -15,6 +15,7 @@ struct FullyConnectedDescriptor;
struct LstmDescriptor;
struct PermuteDescriptor;
struct NormalizationDescriptor;
+struct MeanDescriptor;
struct Pooling2dDescriptor;
struct ReshapeDescriptor;
struct ResizeBilinearDescriptor;
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 0405074d3a..7fd7a25b60 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -279,6 +279,12 @@ public:
/// @return - Interface for configuring the layer.
virtual IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) = 0;
+ /// Add a Mean layer to the network.
+ /// @param meanDescriptor - Parameters for the mean operation.
+ /// @param name - Optional name for the layer.
+ /// @ return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) = 0;
+
protected:
~INetwork() {}
};
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index ac7d08ff62..d00691fad1 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -196,4 +196,11 @@ bool IsFloorSupported(Compute compute,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
+bool IsMeanSupported(Compute compute,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
}
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index ee93d48717..fce1e95429 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -29,6 +29,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Input: return "Input";
case LayerType::L2Normalization: return "L2Normalization";
case LayerType::Lstm: return "Lstm";
+ case LayerType::Mean: return "Mean";
case LayerType::MemCopy: return "MemCopy";
case LayerType::Merger: return "Merger";
case LayerType::Multiplication: return "Multiplication";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index d2c83cdfee..13ab2bc7cd 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -29,6 +29,7 @@ enum class LayerType
Input,
L2Normalization,
Lstm,
+ Mean,
MemCopy,
Merger,
Multiplication,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 59c1c8dbeb..7ed56c5c5d 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -345,4 +345,14 @@ bool IsFloorSupported(Compute compute,
FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
}
+bool IsMeanSupported(Compute compute,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor);
+}
+
}
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index a1dc3555e7..c9ee9dbfec 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -21,6 +21,7 @@
#include "layers/InputLayer.hpp"
#include "layers/L2NormalizationLayer.hpp"
#include "layers/LstmLayer.hpp"
+#include "layers/MeanLayer.hpp"
#include "layers/MemCopyLayer.hpp"
#include "layers/MergerLayer.hpp"
#include "layers/MultiplicationLayer.hpp"
@@ -76,6 +77,7 @@ DECLARE_LAYER(FullyConnected)
DECLARE_LAYER(Input)
DECLARE_LAYER(L2Normalization)
DECLARE_LAYER(Lstm)
+DECLARE_LAYER(Mean)
DECLARE_LAYER(MemCopy)
DECLARE_LAYER(Merger)
DECLARE_LAYER(Multiplication)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index dc531d10ac..22d80d3cd4 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -594,6 +594,11 @@ IConnectableLayer* Network::AddSubtractionLayer(const char* name)
return m_Graph->AddLayer<SubtractionLayer>(name);
}
+IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+{
+ return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
+}
+
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index b6b8548f08..14112426c6 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -117,6 +117,8 @@ public:
IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override;
+
private:
IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp
index aeb2759aa1..4664c2ee32 100644
--- a/src/armnn/backends/ClLayerSupport.cpp
+++ b/src/armnn/backends/ClLayerSupport.cpp
@@ -462,4 +462,12 @@ bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
reasonIfUnsupported);
}
+bool IsMeanSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return false;
+}
+
}
diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp
index dbe546c18d..f5c1226e56 100644
--- a/src/armnn/backends/ClLayerSupport.hpp
+++ b/src/armnn/backends/ClLayerSupport.hpp
@@ -142,6 +142,11 @@ bool IsFloorSupportedCl(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsMeanSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index 73d251893f..7f33c48ed1 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -453,4 +453,12 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
return true;
}
+bool IsMeanSupportedNeon(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return false;
+}
+
}
diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp
index f7b62536a6..95b14b3ba6 100644
--- a/src/armnn/backends/NeonLayerSupport.hpp
+++ b/src/armnn/backends/NeonLayerSupport.hpp
@@ -155,4 +155,9 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsMeanSupportedNeon(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
}
diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp
index 41f57f1677..d56cdebeda 100644
--- a/src/armnn/backends/RefLayerSupport.cpp
+++ b/src/armnn/backends/RefLayerSupport.cpp
@@ -387,4 +387,12 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
&FalseFuncU8<>));
}
+bool IsMeanSupportedRef(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return false;
+}
+
}
diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp
index 464eb1c91c..ff2e7e387f 100644
--- a/src/armnn/backends/RefLayerSupport.hpp
+++ b/src/armnn/backends/RefLayerSupport.hpp
@@ -147,4 +147,9 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsMeanSupportedRef(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
}
diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp
index 3ed77dacdb..25144a4753 100644
--- a/src/armnn/backends/WorkloadData.cpp
+++ b/src/armnn/backends/WorkloadData.cpp
@@ -129,18 +129,6 @@ void ValidateTensorNumDimensions(const TensorInfo& tensor,
}
}
-void ValidateTensorMaxNumElements(const TensorInfo& tensor,
- std::string const& descName,
- unsigned int maxNumElements,
- std::string const& tensorName)
-{
- if (tensor.GetNumElements() > maxNumElements)
- {
- throw InvalidArgumentException(descName + ": Expected maximum of " + to_string(maxNumElements) + " but got " +
- to_string(tensor.GetNumElements()) + " elements for " + tensorName + " tensor.");
- }
-}
-
//---------------------------------------------------------------
void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
const std::string& descName, std::string const& tensorName)
@@ -844,20 +832,17 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
- if (m_Keepdims)
+ if (m_Parameters.m_KeepDims)
{
ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
}
- else if (m_Axis == nullptr)
+ else if (m_Parameters.m_Axis.empty())
{
ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
}
else
{
- const TensorInfo& axis = m_Axis->GetTensorInfo();
- ValidateTensorNumDimensions(axis, "MeanQueueDescriptor", 1, "axis");
- ValidateTensorMaxNumElements(axis, "MeanQueueDescriptor", input.GetNumDimensions(), "axis");
- unsigned int outputDim = input.GetNumDimensions() - axis.GetNumElements();
+ auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
ValidateTensorNumDimensions(output,
"MeanQueueDescriptor",
outputDim > 0 ? outputDim : 1,
diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp
index face761e73..a36f0ad7e6 100644
--- a/src/armnn/backends/WorkloadData.hpp
+++ b/src/armnn/backends/WorkloadData.hpp
@@ -197,17 +197,8 @@ struct SubtractionQueueDescriptor : QueueDescriptor
};
// Mean layer workload data.
-struct MeanQueueDescriptor : QueueDescriptor
+struct MeanQueueDescriptor : QueueDescriptorWithParameters<MeanDescriptor>
{
- MeanQueueDescriptor()
- : m_Axis(nullptr)
- , m_Keepdims(false)
- {
- }
-
- const ConstCpuTensorHandle* m_Axis;
- bool m_Keepdims;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp
index d1887252c2..773a8c1a18 100644
--- a/src/armnn/backends/WorkloadFactory.cpp
+++ b/src/armnn/backends/WorkloadFactory.cpp
@@ -537,6 +537,19 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
reasonCapacity);
break;
}
+ case LayerType::Mean:
+ {
+ auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = IsMeanSupported(compute,
+ OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ cLayer->GetParameters(),
+ reason,
+ reasonCapacity);
+ break;
+ }
default:
{
BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
index 7745972fdd..c5389df06e 100644
--- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
+++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
@@ -328,6 +328,8 @@ DECLARE_LAYER_POLICY_1_PARAM(L2Normalization)
DECLARE_LAYER_POLICY_2_PARAM(Lstm)
+DECLARE_LAYER_POLICY_2_PARAM(Mean)
+
DECLARE_LAYER_POLICY_2_PARAM(Merger)
DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
new file mode 100644
index 0000000000..6bbb0943b0
--- /dev/null
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -0,0 +1,105 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MeanLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include "backends/CpuTensorHandle.hpp"
+#include "backends/WorkloadData.hpp"
+#include "backends/WorkloadFactory.hpp"
+
+#include <cstring>
+
+namespace armnn
+{
+
+MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Mean, param, name)
+{}
+
+std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::Graph& graph,
+ const armnn::IWorkloadFactory& factory) const
+{
+ MeanQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
+ descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
+
+ return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+MeanLayer* MeanLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<MeanLayer>(graph, m_Param, GetName());
+
+ layer->m_Param.m_Axis = m_Param.m_Axis;
+ layer->m_Param.m_KeepDims = m_Param.m_KeepDims;
+
+ return std::move(layer);
+}
+
+void MeanLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+ BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= MaxNumOfTensorDimensions,
+ "MeanLayer: Mean supports up to 4D input.");
+
+ unsigned int rank = input.GetNumDimensions();
+ unsigned int outputRank = 0;
+
+ // Calculate output dimension
+ if (m_Param.m_KeepDims)
+ {
+ outputRank = rank;
+ }
+ else if (m_Param.m_Axis.empty())
+ {
+ outputRank = 1;
+ }
+ else if (m_Param.m_Axis.size() <= input.GetNumDimensions())
+ {
+ throw LayerValidationException("MeanLayer: Dimensions to reduce can not be bigger than input dimensions");
+ }
+ else
+ {
+ outputRank = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Param.m_Axis.size());
+ if (outputRank == 0)
+ {
+ outputRank = 1;
+ }
+ }
+
+ unsigned int dimSizes[outputRank];
+ memset(dimSizes, 1, outputRank * sizeof(unsigned int));
+
+ if (!m_Param.m_Axis.empty())
+ {
+ // Skip the dimension that has been reduced unless keepDims is true.
+ unsigned int outputIndex = 0;
+ for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
+ {
+ if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end())
+ {
+ dimSizes[outputIndex] = boost::numeric_cast<unsigned int>(input.GetShape()[i]);
+ ++outputIndex;
+ }
+ else if (m_Param.m_KeepDims)
+ {
+ dimSizes[outputIndex] = 1;
+ ++outputIndex;
+ }
+ }
+ }
+ const TensorShape& inferredShape = TensorShape(outputRank, dimSizes);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShape);
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
new file mode 100644
index 0000000000..ecb92979d2
--- /dev/null
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class MeanLayer : public LayerWithParameters<MeanDescriptor>
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ MeanLayer* Clone(Graph& graph) const override;
+
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ MeanLayer(const MeanDescriptor& param, const char* name);
+ ~MeanLayer() = default;
+
+};
+
+} \ No newline at end of file