aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp10
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/backends/ClLayerSupport.cpp8
-rw-r--r--src/armnn/backends/ClLayerSupport.hpp5
-rw-r--r--src/armnn/backends/NeonLayerSupport.cpp8
-rw-r--r--src/armnn/backends/NeonLayerSupport.hpp5
-rw-r--r--src/armnn/backends/RefLayerSupport.cpp8
-rw-r--r--src/armnn/backends/RefLayerSupport.hpp5
-rw-r--r--src/armnn/backends/WorkloadData.cpp21
-rw-r--r--src/armnn/backends/WorkloadData.hpp11
-rw-r--r--src/armnn/backends/WorkloadFactory.cpp13
-rw-r--r--src/armnn/backends/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp105
-rw-r--r--src/armnn/layers/MeanLayer.hpp29
18 files changed, 213 insertions, 28 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index ee93d48717..fce1e95429 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -29,6 +29,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Input: return "Input";
case LayerType::L2Normalization: return "L2Normalization";
case LayerType::Lstm: return "Lstm";
+ case LayerType::Mean: return "Mean";
case LayerType::MemCopy: return "MemCopy";
case LayerType::Merger: return "Merger";
case LayerType::Multiplication: return "Multiplication";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index d2c83cdfee..13ab2bc7cd 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -29,6 +29,7 @@ enum class LayerType
Input,
L2Normalization,
Lstm,
+ Mean,
MemCopy,
Merger,
Multiplication,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 59c1c8dbeb..7ed56c5c5d 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -345,4 +345,14 @@ bool IsFloorSupported(Compute compute,
FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
}
+bool IsMeanSupported(Compute compute,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor);
+}
+
}
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index a1dc3555e7..c9ee9dbfec 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -21,6 +21,7 @@
#include "layers/InputLayer.hpp"
#include "layers/L2NormalizationLayer.hpp"
#include "layers/LstmLayer.hpp"
+#include "layers/MeanLayer.hpp"
#include "layers/MemCopyLayer.hpp"
#include "layers/MergerLayer.hpp"
#include "layers/MultiplicationLayer.hpp"
@@ -76,6 +77,7 @@ DECLARE_LAYER(FullyConnected)
DECLARE_LAYER(Input)
DECLARE_LAYER(L2Normalization)
DECLARE_LAYER(Lstm)
+DECLARE_LAYER(Mean)
DECLARE_LAYER(MemCopy)
DECLARE_LAYER(Merger)
DECLARE_LAYER(Multiplication)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index dc531d10ac..22d80d3cd4 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -594,6 +594,11 @@ IConnectableLayer* Network::AddSubtractionLayer(const char* name)
return m_Graph->AddLayer<SubtractionLayer>(name);
}
+IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+{
+ return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
+}
+
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index b6b8548f08..14112426c6 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -117,6 +117,8 @@ public:
IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override;
+
private:
IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp
index aeb2759aa1..4664c2ee32 100644
--- a/src/armnn/backends/ClLayerSupport.cpp
+++ b/src/armnn/backends/ClLayerSupport.cpp
@@ -462,4 +462,12 @@ bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
reasonIfUnsupported);
}
+bool IsMeanSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return false;
+}
+
}
diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp
index dbe546c18d..f5c1226e56 100644
--- a/src/armnn/backends/ClLayerSupport.hpp
+++ b/src/armnn/backends/ClLayerSupport.hpp
@@ -142,6 +142,11 @@ bool IsFloorSupportedCl(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsMeanSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index 73d251893f..7f33c48ed1 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -453,4 +453,12 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
return true;
}
+bool IsMeanSupportedNeon(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return false;
+}
+
}
diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp
index f7b62536a6..95b14b3ba6 100644
--- a/src/armnn/backends/NeonLayerSupport.hpp
+++ b/src/armnn/backends/NeonLayerSupport.hpp
@@ -155,4 +155,9 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsMeanSupportedNeon(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
}
diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp
index 41f57f1677..d56cdebeda 100644
--- a/src/armnn/backends/RefLayerSupport.cpp
+++ b/src/armnn/backends/RefLayerSupport.cpp
@@ -387,4 +387,12 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
&FalseFuncU8<>));
}
+bool IsMeanSupportedRef(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return false;
+}
+
}
diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp
index 464eb1c91c..ff2e7e387f 100644
--- a/src/armnn/backends/RefLayerSupport.hpp
+++ b/src/armnn/backends/RefLayerSupport.hpp
@@ -147,4 +147,9 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsMeanSupportedRef(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
}
diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp
index 3ed77dacdb..25144a4753 100644
--- a/src/armnn/backends/WorkloadData.cpp
+++ b/src/armnn/backends/WorkloadData.cpp
@@ -129,18 +129,6 @@ void ValidateTensorNumDimensions(const TensorInfo& tensor,
}
}
-void ValidateTensorMaxNumElements(const TensorInfo& tensor,
- std::string const& descName,
- unsigned int maxNumElements,
- std::string const& tensorName)
-{
- if (tensor.GetNumElements() > maxNumElements)
- {
- throw InvalidArgumentException(descName + ": Expected maximum of " + to_string(maxNumElements) + " but got " +
- to_string(tensor.GetNumElements()) + " elements for " + tensorName + " tensor.");
- }
-}
-
//---------------------------------------------------------------
void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
const std::string& descName, std::string const& tensorName)
@@ -844,20 +832,17 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
- if (m_Keepdims)
+ if (m_Parameters.m_KeepDims)
{
ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
}
- else if (m_Axis == nullptr)
+ else if (m_Parameters.m_Axis.empty())
{
ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
}
else
{
- const TensorInfo& axis = m_Axis->GetTensorInfo();
- ValidateTensorNumDimensions(axis, "MeanQueueDescriptor", 1, "axis");
- ValidateTensorMaxNumElements(axis, "MeanQueueDescriptor", input.GetNumDimensions(), "axis");
- unsigned int outputDim = input.GetNumDimensions() - axis.GetNumElements();
+ auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
ValidateTensorNumDimensions(output,
"MeanQueueDescriptor",
outputDim > 0 ? outputDim : 1,
diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp
index face761e73..a36f0ad7e6 100644
--- a/src/armnn/backends/WorkloadData.hpp
+++ b/src/armnn/backends/WorkloadData.hpp
@@ -197,17 +197,8 @@ struct SubtractionQueueDescriptor : QueueDescriptor
};
// Mean layer workload data.
-struct MeanQueueDescriptor : QueueDescriptor
+struct MeanQueueDescriptor : QueueDescriptorWithParameters<MeanDescriptor>
{
- MeanQueueDescriptor()
- : m_Axis(nullptr)
- , m_Keepdims(false)
- {
- }
-
- const ConstCpuTensorHandle* m_Axis;
- bool m_Keepdims;
-
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp
index d1887252c2..773a8c1a18 100644
--- a/src/armnn/backends/WorkloadFactory.cpp
+++ b/src/armnn/backends/WorkloadFactory.cpp
@@ -537,6 +537,19 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
reasonCapacity);
break;
}
+ case LayerType::Mean:
+ {
+ auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = IsMeanSupported(compute,
+ OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ cLayer->GetParameters(),
+ reason,
+ reasonCapacity);
+ break;
+ }
default:
{
BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
index 7745972fdd..c5389df06e 100644
--- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
+++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
@@ -328,6 +328,8 @@ DECLARE_LAYER_POLICY_1_PARAM(L2Normalization)
DECLARE_LAYER_POLICY_2_PARAM(Lstm)
+DECLARE_LAYER_POLICY_2_PARAM(Mean)
+
DECLARE_LAYER_POLICY_2_PARAM(Merger)
DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
new file mode 100644
index 0000000000..6bbb0943b0
--- /dev/null
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -0,0 +1,105 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MeanLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include "backends/CpuTensorHandle.hpp"
+#include "backends/WorkloadData.hpp"
+#include "backends/WorkloadFactory.hpp"
+
+#include <cstring>
+
+namespace armnn
+{
+
+MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Mean, param, name)
+{}
+
+std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::Graph& graph,
+ const armnn::IWorkloadFactory& factory) const
+{
+ MeanQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
+ descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
+
+ return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+MeanLayer* MeanLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<MeanLayer>(graph, m_Param, GetName());
+
+ layer->m_Param.m_Axis = m_Param.m_Axis;
+ layer->m_Param.m_KeepDims = m_Param.m_KeepDims;
+
+ return std::move(layer);
+}
+
+void MeanLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+ BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= MaxNumOfTensorDimensions,
+ "MeanLayer: Mean supports up to 4D input.");
+
+ unsigned int rank = input.GetNumDimensions();
+ unsigned int outputRank = 0;
+
+ // Calculate output dimension
+ if (m_Param.m_KeepDims)
+ {
+ outputRank = rank;
+ }
+ else if (m_Param.m_Axis.empty())
+ {
+ outputRank = 1;
+ }
+ else if (m_Param.m_Axis.size() <= input.GetNumDimensions())
+ {
+ throw LayerValidationException("MeanLayer: Dimensions to reduce can not be bigger than input dimensions");
+ }
+ else
+ {
+ outputRank = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Param.m_Axis.size());
+ if (outputRank == 0)
+ {
+ outputRank = 1;
+ }
+ }
+
+ unsigned int dimSizes[outputRank];
+ memset(dimSizes, 1, outputRank * sizeof(unsigned int));
+
+ if (!m_Param.m_Axis.empty())
+ {
+ // Skip the dimension that has been reduced unless keepDims is true.
+ unsigned int outputIndex = 0;
+ for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
+ {
+ if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end())
+ {
+ dimSizes[outputIndex] = boost::numeric_cast<unsigned int>(input.GetShape()[i]);
+ ++outputIndex;
+ }
+ else if (m_Param.m_KeepDims)
+ {
+ dimSizes[outputIndex] = 1;
+ ++outputIndex;
+ }
+ }
+ }
+ const TensorShape& inferredShape = TensorShape(outputRank, dimSizes);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShape);
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
new file mode 100644
index 0000000000..ecb92979d2
--- /dev/null
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class MeanLayer : public LayerWithParameters<MeanDescriptor>
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ MeanLayer* Clone(Graph& graph) const override;
+
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ MeanLayer(const MeanDescriptor& param, const char* name);
+ ~MeanLayer() = default;
+
+};
+
+} \ No newline at end of file