aboutsummaryrefslogtreecommitdiff
path: root/test/Convolution2D.hpp
blob: 38216f106bb6020f8d735e336e5ef084a7301f82 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "DriverTestHelpers.hpp"

#include <boost/test/unit_test.hpp>
#include <log/log.h>

#include <OperationsUtils.h>

BOOST_AUTO_TEST_SUITE(Convolution2DTests)

using namespace android::hardware;
using namespace driverTestHelpers;
using namespace armnn_driver;

namespace driverTestHelpers
{
#define ARMNN_ANDROID_FP16_TEST(result, fp16Expectation, fp32Expectation, fp16Enabled) \
   if (fp16Enabled) \
   { \
       BOOST_TEST((result == fp16Expectation || result == fp32Expectation), result << \
       " does not match either " << fp16Expectation << "[fp16] or " << fp32Expectation << "[fp32]"); \
   } else \
   { \
      BOOST_TEST(result == fp32Expectation); \
   }

void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled);

void SetModelFp16Flag(V1_1::Model& model, bool fp16Enabled);

template<typename HalPolicy>
void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled = false)
{
    using HalModel         = typename HalPolicy::Model;
    using HalOperationType = typename HalPolicy::OperationType;

    armnn::Compute computeDevice = armnn::Compute::GpuAcc;

#ifndef ARMCOMPUTECL_ENABLED
    computeDevice = armnn::Compute::CpuRef;
#endif

    auto driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice, fp16Enabled));
    HalModel model = {};

    uint32_t outSize = paddingScheme == android::nn::kPaddingSame ? 2 : 1;

    // add operands
    float weightValue[] = {1.f, -1.f, 0.f, 1.f};
    float biasValue[]   = {0.f};

    AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 3, 1});
    AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
    AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
    AddIntOperand<HalPolicy>(model, (int32_t)paddingScheme); // padding
    AddIntOperand<HalPolicy>(model, 2); // stride x
    AddIntOperand<HalPolicy>(model, 2); // stride y
    AddIntOperand<HalPolicy>(model, 0); // no activation
    AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, outSize, 1});

    // make the convolution operation
    model.operations.resize(1);
    model.operations[0].type = HalOperationType::CONV_2D;
    model.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
    model.operations[0].outputs = hidl_vec<uint32_t>{7};

    // make the prepared model
    SetModelFp16Flag(model, fp16Enabled);
    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);

    // construct the request
    DataLocation inloc    = {};
    inloc.poolIndex       = 0;
    inloc.offset          = 0;
    inloc.length          = 6 * sizeof(float);
    RequestArgument input = {};
    input.location        = inloc;
    input.dimensions      = hidl_vec<uint32_t>{};

    DataLocation outloc    = {};
    outloc.poolIndex       = 1;
    outloc.offset          = 0;
    outloc.length          = outSize * sizeof(float);
    RequestArgument output = {};
    output.location        = outloc;
    output.dimensions      = hidl_vec<uint32_t>{};

    V1_0::Request request = {};
    request.inputs  = hidl_vec<RequestArgument>{input};
    request.outputs = hidl_vec<RequestArgument>{output};

    // set the input data (matching source test)
    float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f};
    AddPoolAndSetData(6, request, indata);

    // add memory for the output
    android::sp<IMemory> outMemory = AddPoolAndGetData<float>(outSize, request);
    float* outdata = reinterpret_cast<float*>(static_cast<void*>(outMemory->getPointer()));

    // run the execution
    Execute(preparedModel, request);

    // check the result
    switch (paddingScheme)
    {
    case android::nn::kPaddingValid:
        ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
        break;
    case android::nn::kPaddingSame:
        ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
        BOOST_TEST(outdata[1] == 0.f);
        break;
    default:
        BOOST_TEST(false);
        break;
    }
}

} // namespace driverTestHelpers

BOOST_AUTO_TEST_SUITE_END()