aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp15
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp89
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp29
8 files changed, 146 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 931b6a3579..3493a3d5a2 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -17,6 +17,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Activation: return "Activation";
case LayerType::Addition: return "Addition";
case LayerType::BatchNormalization: return "BatchNormalization";
+ case LayerType::BatchToSpaceNd: return "BatchToSpaceNd";
case LayerType::Constant: return "Constant";
case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32";
case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 0661b16649..dc3c55edac 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -17,6 +17,7 @@ enum class LayerType
Activation = FirstLayer,
Addition,
BatchNormalization,
+ BatchToSpaceNd,
Constant,
ConvertFp16ToFp32,
ConvertFp32ToFp16,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index fb3ce43646..5d2d205534 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -12,6 +12,7 @@
#include <cstring>
#include <algorithm>
#include <unordered_map>
+#include <armnn/ArmNN.hpp>
namespace armnn
{
@@ -100,6 +101,20 @@ bool IsBatchNormalizationSupported(const BackendId& backend,
descriptor);
}
+bool IsBatchToSpaceNdSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsBatchToSpaceNdSupported,
+ input,
+ output,
+ descriptor);
+}
+
bool IsConstantSupported(const BackendId& backend,
const TensorInfo& output,
char* reasonIfUnsupported,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 5c08b6677f..bd1297b550 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -9,6 +9,7 @@
#include "layers/ActivationLayer.hpp"
#include "layers/AdditionLayer.hpp"
#include "layers/BatchNormalizationLayer.hpp"
+#include "layers/BatchToSpaceNdLayer.hpp"
#include "layers/ConstantLayer.hpp"
#include "layers/ConvertFp16ToFp32Layer.hpp"
#include "layers/ConvertFp32ToFp16Layer.hpp"
@@ -67,6 +68,7 @@ constexpr LayerType LayerEnumOf(const T* = nullptr);
DECLARE_LAYER(Activation)
DECLARE_LAYER(Addition)
DECLARE_LAYER(BatchNormalization)
+DECLARE_LAYER(BatchToSpaceNd)
DECLARE_LAYER(Constant)
DECLARE_LAYER(ConvertFp16ToFp32)
DECLARE_LAYER(ConvertFp32ToFp16)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 7b430c3ac5..3b3ee3146a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -351,6 +351,12 @@ IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
return m_Graph->AddLayer<InputLayer>(id, name);
}
+IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
+}
+
IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const ConstTensor* biases,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 4a93dd1ee4..95cdb28bfb 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -34,6 +34,9 @@ public:
IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override;
+ IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
const ConstTensor& weights,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
new file mode 100644
index 0000000000..595ce4a7fe
--- /dev/null
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -0,0 +1,89 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "BatchToSpaceNdLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+#include "LayerWithParameters.hpp"
+#include "BatchToSpaceNdLayer.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+BatchToSpaceNdLayer::BatchToSpaceNdLayer(const armnn::BatchToSpaceNdDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::BatchToSpaceNd, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> BatchToSpaceNdLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ BatchToSpaceNdQueueDescriptor descriptor;
+
+ return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<BatchToSpaceNdLayer>(graph, m_Param, GetName());
+ return std::move(layer);
+}
+
+void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),inferredShapes[0]);
+}
+
+std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ const DataLayoutIndexed & dataLayout = m_Param.m_DataLayout;
+ const TensorShape& inputShape = inputShapes[0];
+ unsigned int inBatchSize = inputShape[0];
+ unsigned int channelSize = inputShape[dataLayout.GetChannelsIndex()];
+
+ std::vector<unsigned int> theBlockShape = m_Param.m_BlockShape;
+
+ unsigned int overallSize = inBatchSize;
+
+ for (unsigned int i = 0; i < theBlockShape.size(); ++i)
+ {
+ overallSize = overallSize * theBlockShape.at(i);
+ }
+
+ std::vector<std::vector<unsigned int>> crops = m_Param.m_Crops;
+
+ std::vector<unsigned int> yCrops = crops[0];
+ std::vector<unsigned int> xCrops = crops[1];
+
+ unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()];
+ unsigned int outputHeight = theBlockShape.at(0) * (inputHeight - (yCrops[0] + yCrops[1]));
+
+ unsigned int inputWidth = inputShape[dataLayout.GetWidthIndex()];
+ unsigned int outputWidth = theBlockShape.at(1) * (inputWidth - (xCrops[0] + xCrops[1]));
+
+ unsigned int outputBatchSize = overallSize / (outputHeight * outputWidth);
+
+ if (dataLayout == DataLayout::NHWC)
+ {
+ return std::vector<TensorShape>({ TensorShape({ outputBatchSize, outputHeight, outputWidth, channelSize }) });
+ }
+ else
+ {
+ return std::vector<TensorShape>({ TensorShape({ outputBatchSize, channelSize, outputHeight, outputWidth }) });
+ }
+}
+} // namespace armnn
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
new file mode 100644
index 0000000000..eb5f979f3a
--- /dev/null
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class BatchToSpaceNdLayer : public LayerWithParameters<BatchToSpaceNdDescriptor>
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ BatchToSpaceNdLayer* Clone(Graph& graph) const override;
+
+ void ValidateTensorShapesFromInputs() override;
+
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+protected:
+ BatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& param, const char* name);
+ ~BatchToSpaceNdLayer() = default;
+};
+
+} // namespace