aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/SpaceToBatchNdLayer.cpp
blob: 3f58b3f6c913b17e32d288da9f8d622c13150e31 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "SpaceToBatchNdLayer.hpp"
#include "LayerCloneBase.hpp"

#include <armnn/TypesUtils.hpp>

#include <armnnUtils/DataLayoutIndexed.hpp>

#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>

#include <numeric>

using namespace armnnUtils;

namespace armnn
{

SpaceToBatchNdLayer::SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, const char* name)
    : LayerWithParameters(1, 1, LayerType::SpaceToBatchNd, param, name)
{}

std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
    SpaceToBatchNdQueueDescriptor descriptor;
    descriptor.m_Parameters.m_BlockShape = m_Param.m_BlockShape;
    descriptor.m_Parameters.m_PadList    = m_Param.m_PadList;
    SetAdditionalInfo(descriptor);

    return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor));
}

SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
{
    IgnoreUnused(graph);
    return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
}

std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
    ARMNN_ASSERT(inputShapes.size() == 1);

    TensorShape inputShape = inputShapes[0];
    TensorShape outputShape(inputShape);

    outputShape[0] = inputShape[0] * std::accumulate(m_Param.m_BlockShape.begin(),
                                                     m_Param.m_BlockShape.end(),
                                                     1U,
                                                     std::multiplies<>());

    DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
    unsigned int heightIndex = dimensionIndices.GetHeightIndex();
    unsigned int widthIndex = dimensionIndices.GetWidthIndex();

    std::pair<unsigned int, unsigned int> heightPad = m_Param.m_PadList[0];
    std::pair<unsigned int, unsigned int> widthPad = m_Param.m_PadList[1];

    outputShape[heightIndex] =
        (inputShape[heightIndex] + heightPad.first + heightPad.second) / m_Param.m_BlockShape[0];
    outputShape[widthIndex] =
        (inputShape[widthIndex] + widthPad.first + widthPad.second) / m_Param.m_BlockShape[1];

    return std::vector<TensorShape>({ outputShape });
}

void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
{
    VerifyLayerConnections(1, CHECK_LOCATION());

    const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();

    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);

    std::vector<TensorShape> inferredShapes = InferOutputShapes({
        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });

    ARMNN_ASSERT(inferredShapes.size() == 1);

    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}

ARMNN_NO_DEPRECATE_WARN_BEGIN
void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
{
    visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
}
ARMNN_NO_DEPRECATE_WARN_END

} // namespace