aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/MeanLayer.cpp
blob: c72d79bab9af1b82eda171c49ca6c2dd30ba6f4d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "MeanLayer.hpp"
#include "LayerCloneBase.hpp"

#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>

#include <cstring>

namespace armnn
{

MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name)
    : LayerWithParameters(1, 1, LayerType::Mean, param, name)
{}

std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::Graph& graph,
                                                     const armnn::IWorkloadFactory& factory) const
{
    MeanQueueDescriptor descriptor;
    descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
    descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;

    return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph));
}

MeanLayer* MeanLayer::Clone(Graph& graph) const
{
    auto layer = CloneBase<MeanLayer>(graph, m_Param, GetName());

    layer->m_Param.m_Axis = m_Param.m_Axis;
    layer->m_Param.m_KeepDims = m_Param.m_KeepDims;

    return std::move(layer);
}

void MeanLayer::ValidateTensorShapesFromInputs()
{
    VerifyLayerConnections(1, CHECK_LOCATION());

    const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();

    BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= MaxNumOfTensorDimensions,
                     "MeanLayer: Mean supports up to 4D input.");

    unsigned int rank = input.GetNumDimensions();
    unsigned int outputRank = 0;

    // Calculate output dimension
    if (m_Param.m_KeepDims)
    {
        outputRank = rank;
    }
    else if (m_Param.m_Axis.empty())
    {
        outputRank = 1;
    }
    else if (m_Param.m_Axis.size() >= input.GetNumDimensions())
    {
        throw LayerValidationException("MeanLayer: Dimensions to reduce can not be bigger than input dimensions");
    }
    else
    {
        outputRank = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Param.m_Axis.size());
        if (outputRank == 0)
        {
            outputRank = 1;
        }
    }

    std::vector<unsigned int> dimSizes(outputRank, 1);
    if (!m_Param.m_Axis.empty())
    {
        // Skip the dimension that has been reduced unless keepDims is true.
        unsigned int outputIndex = 0;
        for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
        {
            if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end())
            {
                dimSizes[outputIndex] = boost::numeric_cast<unsigned int>(input.GetShape()[i]);
                ++outputIndex;
            }
            else if (m_Param.m_KeepDims)
            {
                dimSizes[outputIndex] = 1;
                ++outputIndex;
            }
        }
    }
    const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data());

    ConditionalThrowIfNotEqual<LayerValidationException>(
        "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
        GetOutputSlot(0).GetTensorInfo().GetShape(),
        inferredShape);
}

void MeanLayer::Accept(ILayerVisitor& visitor) const
{
    visitor.VisitMeanLayer(this, GetParameters(), GetName());
}

} // namespace armnn