aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
blob: 64da92c8150c1f336106d16353bf63830d079525 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "ClSpaceToBatchNdWorkload.hpp"

#include "ClWorkloadUtils.hpp"

#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>
#include <cl/ClTensorHandle.hpp>
#include <cl/ClLayerSupport.hpp>

#include <boost/polymorphic_pointer_cast.hpp>

namespace armnn
{
using namespace armcomputetensorutils;

arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo& input,
                                                     const TensorInfo& output,
                                                     const SpaceToBatchNdDescriptor& descriptor)
{
    const arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);

    // ArmNN blockShape is [H, W] Cl asks for W, H
    int32_t blockHeight = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[0]);
    int32_t blockWidth  = boost::numeric_cast<int32_t>(descriptor.m_BlockShape[1]);

    arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
        descriptor.m_PadList[1].first, descriptor.m_PadList[0].first);
    arm_compute::Size2D paddingRightBottom  = BuildArmComputeSize2D(
        descriptor.m_PadList[1].second, descriptor.m_PadList[0].second);

    return arm_compute::CLSpaceToBatchLayer::validate(&aclInputInfo,
                                                      blockWidth,
                                                      blockHeight,
                                                      paddingLeftTop,
                                                      paddingRightBottom,
                                                      &aclOutputInfo);
}

ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(
    const SpaceToBatchNdQueueDescriptor& descriptor, const WorkloadInfo& info)
    : BaseWorkload<SpaceToBatchNdQueueDescriptor>(descriptor, info)
{
    m_Data.ValidateInputsOutputs("ClSpaceToBatchNdWorkload", 1, 1);

    arm_compute::ICLTensor& input  =
        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
    arm_compute::ICLTensor& output =
        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();

    // ArmNN blockShape is [H, W] Cl asks for W, H
    int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
    int32_t blockWidth  = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[1]);

    arm_compute::Size2D paddingLeftTop = BuildArmComputeSize2D(
        m_Data.m_Parameters.m_PadList[1].first, m_Data.m_Parameters.m_PadList[0].first);
    arm_compute::Size2D paddingRightBottom  = BuildArmComputeSize2D(
        m_Data.m_Parameters.m_PadList[1].second, m_Data.m_Parameters.m_PadList[0].second);

    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
    input.info()->set_data_layout(aclDataLayout);
    output.info()->set_data_layout(aclDataLayout);

    m_SpaceToBatchLayer.configure(&input,
                                  blockWidth,
                                  blockHeight,
                                  paddingLeftTop,
                                  paddingRightBottom,
                                  &output);
}

void ClSpaceToBatchNdWorkload::Execute() const
{
    ARMNN_SCOPED_PROFILING_EVENT_CL("ClSpaceToBatchNdWorkload_Execute");
    RunClFunction(m_SpaceToBatchLayer, CHECK_LOCATION());
}

} //namespace armnn