aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
blob: aec57dbad1155c60556542e5486bb713785855a4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
//
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "CastTestImpl.hpp"
#include "ElementwiseUnaryTestImpl.hpp"


template<armnn::DataType inputDataType, armnn::DataType outputDataType, typename TInput, typename TOutput>
LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
                                     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                     const armnn::ITensorHandleFactory& tensorHandleFactory,
                                     const std::vector<TInput>& inputValues,
                                     const std::vector<TOutput>& outputValues)
{
    IgnoreUnused(memoryManager);
    armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, inputDataType);
    armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, outputDataType);
    float quantizationScale = 1.0f;
    int32_t quantizationOffset = 0;

    if(armnn::IsQuantizedType<TInput>())
    {
        inputTensorInfo.SetQuantizationScale(quantizationScale);
        inputTensorInfo.SetQuantizationOffset(quantizationOffset);
    }
    if(armnn::IsQuantizedType<TOutput>())
    {
        outputTensorInfo.SetQuantizationScale(quantizationScale);
        outputTensorInfo.SetQuantizationOffset(quantizationOffset);
    }

    std::vector<TOutput> actualOutput(outputTensorInfo.GetNumElements());

    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);

    armnn::CastQueueDescriptor data;
    armnn::WorkloadInfo info;
    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());

    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateCast(data, info);

    inputHandle->Allocate();
    outputHandle->Allocate();

    CopyDataToITensorHandle(inputHandle.get(), inputValues.data());

    workload->Execute();

    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());

    return LayerTestResult<TOutput, 4>(actualOutput,
                                       outputValues,
                                       outputHandle->GetShape(),
                                       outputTensorInfo.GetShape());
}

LayerTestResult<float, 4> CastInt32ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
                                 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                 const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<int32_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<float> outputValues  = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                        3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    return CastTest<armnn::DataType::Signed32, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}

LayerTestResult<float, 4> CastInt16ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                               const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<int16_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<float> outputValues  = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                         3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    return CastTest<armnn::DataType::QSymmS16, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}

LayerTestResult<float, 4> CastInt8ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
                                                 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                                 const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<float> outputValues  = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                         3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    return CastTest<armnn::DataType::QSymmS8, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                        outputValues);
}

LayerTestResult<float, 4> CastInt8AsymmToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
                                                const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                                const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<int8_t> inputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
                                        3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<float> outputValues  = { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                         3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    return CastTest<armnn::DataType::QAsymmS8, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                        tensorHandleFactory, inputValues, outputValues);
}

LayerTestResult<float, 4> CastUInt8ToFloat2dTest(armnn::IWorkloadFactory& workloadFactory,
                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                               const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<u_int8_t> inputValues = { 1, 3, 1, 3, 1, 3, 1, 3, 1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<float> outputValues  = { 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f,
                                         3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    return CastTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}

LayerTestResult<uint8_t, 4> CastInt8ToUInt82dTest(armnn::IWorkloadFactory& workloadFactory,
                                                  const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                                  const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<int8_t> inputValues  = { -1, -3, -1, -3, -1, -3, -1, -3, -1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          3, 1, 3, 1, 2, 1, 3, 1, 3 };
    return CastTest<armnn::DataType::QSymmS8, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}

LayerTestResult<uint8_t, 4> CastInt8AsymmToUInt82dTest(armnn::IWorkloadFactory& workloadFactory,
                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                               const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<int8_t> inputValues  = { -1, -3, -1, -3, -1, -3, -1, -3, -1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          3, 1, 3, 1, 2, 1, 3, 1, 3 };
    return CastTest<armnn::DataType::QAsymmS8, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                          outputValues);
}

LayerTestResult<float, 4> CastFloat16ToFloat322dTest(armnn::IWorkloadFactory& workloadFactory,
                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                               const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    using namespace half_float::literal;

    std::vector<armnn::Half> inputValues = { -1.10_h, -3._h, -1.30_h, -3._h, -1._h, -3._h, -1._h, -3._h, 1._h,
                                         3.10_h, 1._h, 3.30_h, 1._h, 2._h, 1._h, 3._h, 1._h, 3._h };
    std::vector<float> outputValues  = { -1.1f, -3.0f, -1.3f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                       3.1f, 1.0f, 3.3f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    return CastTest<armnn::DataType::Float16, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                        tensorHandleFactory, inputValues,
                                                                        outputValues);
}

LayerTestResult<float, 4> CastBFloat16ToFloat322dTest(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
                                              const armnn::ITensorHandleFactory& tensorHandleFactory)
{

    std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
            {
                    -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
                    1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
            },
            1.0f, 0);


    std::vector<float> outputValues = { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
                                                1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f };

    return CastTest<armnn::DataType::BFloat16, armnn::DataType::Float32>(workloadFactory, memoryManager,
                                                                        tensorHandleFactory, inputValues, outputValues);
}

LayerTestResult<armnn::Half, 4> CastFloat32ToFloat162dTest(
        armnn::IWorkloadFactory& workloadFactory,
        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
        const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    using namespace half_float::literal;

    std::vector<float> inputValues = { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f,
                                       0.00000004f, 3.4E38f, 300.0f, 0.5f, 1.3f, 1.5f, 2.1E4f, 8.76f, 15.2f, 37.5f };
    std::vector<armnn::Half> outputValues = {-37.50_h, -15.20_h, -8.76_h, -2._h, -1.50_h, -1.30_h, -0.50_h, -0.40_h,
                                     0._h, 6.55E4_h, 300._h, 0.50_h, 1.30_h, 1.50_h, 2.1E4_h, 8.76_h, 15.20_h, 37.50_h};

    return CastTest<armnn::DataType::Float32, armnn::DataType::Float16>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}

LayerTestResult<int8_t , 4> CastFloat32ToInt82dTest(
        armnn::IWorkloadFactory& workloadFactory,
        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
        const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<float> inputValues  = { -1.0f, -3.5f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                         3.1f, 1.5f, 3.9f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    std::vector<int8_t> outputValues = { -1, -3, -1, -3, -1, -3, -1, -3, 1,
                                        3, 1, 3, 1, 2, 1, 3, 1, 3 };
    return CastTest<armnn::DataType::Float32, armnn::DataType::QAsymmS8>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}

LayerTestResult<uint8_t , 4> CastFloat32ToUInt82dTest(
        armnn::IWorkloadFactory& workloadFactory,
        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
        const armnn::ITensorHandleFactory& tensorHandleFactory)
{
    std::vector<float> inputValues  = { -1.0f, -3.5f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
                                        3.1f, 1.5f, 3.9f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
    std::vector<uint8_t> outputValues = { 0, 0, 0, 0, 0, 0, 0, 0, 1,
                                         3, 1, 3, 1, 2, 1, 3, 1, 3 };
    return CastTest<armnn::DataType::Float32, armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
                                                                         tensorHandleFactory, inputValues,
                                                                         outputValues);
}