aboutsummaryrefslogtreecommitdiff
path: root/delegate/test/ArmnnDelegateTest.cpp
blob: 4ef4ce9ed2e2be519646e832e5e09deec781577b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
//
// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest/doctest.h>

#include <classic/include/armnn_delegate.hpp>
#include <opaque/include/armnn_delegate.hpp>
#include <opaque/include/Version.hpp>

#include <tensorflow/lite/kernels/builtin_op_kernels.h>
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>

namespace armnnDelegate
{

TEST_SUITE("ArmnnDelegate")
{

TEST_CASE ("ArmnnDelegate Registered")
{
    using namespace tflite;
    auto tfLiteInterpreter = std::make_unique<Interpreter>();

    tfLiteInterpreter->AddTensors(3);
    tfLiteInterpreter->SetInputs({0, 1});
    tfLiteInterpreter->SetOutputs({2});

    tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
    tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
    tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());

    tflite::ops::builtin::BuiltinOpResolver opResolver;
    const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
    tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);

    // Create the Armnn Delegate
    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
    std::vector<armnn::BackendOptions> backendOptions;
    backendOptions.emplace_back(
        armnn::BackendOptions{ "BackendName",
                               {
                                  { "Option1", 42 },
                                  { "Option2", true }
                               }}
    );

    armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
                       theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
                                        armnnDelegate::TfLiteArmnnDelegateDelete);

    auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
    CHECK(status == kTfLiteOk);
    CHECK(tfLiteInterpreter != nullptr);
}

TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered")
{
    using namespace tflite;
    auto tfLiteInterpreter = std::make_unique<Interpreter>();

    tfLiteInterpreter->AddTensors(3);
    tfLiteInterpreter->SetInputs({0, 1});
    tfLiteInterpreter->SetOutputs({2});

    tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
    tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
    tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());

    tflite::ops::builtin::BuiltinOpResolver opResolver;
    const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
    tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);

    // Create the Armnn Delegate
    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };

    armnn::OptimizerOptions optimizerOptions(true, true, false, true);

    armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
                       theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
                                        armnnDelegate::TfLiteArmnnDelegateDelete);

    auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
    CHECK(status == kTfLiteOk);
    CHECK(tfLiteInterpreter != nullptr);
}

TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
{
    // Check default options can be created
    auto options = TfLiteArmnnDelegateOptionsDefault();
    armnnOpaqueDelegate::ArmnnOpaqueDelegate delegate(options);

    // Check version returns correctly
    auto version = delegate.GetVersion();
    CHECK_EQ(version, OPAQUE_DELEGATE_VERSION);

    auto* builder = delegate.GetDelegateBuilder();
    CHECK(builder);

    // Check Opaque delegate created
    auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(&options);
    CHECK(opaqueDelegate);

    // Check Opaque Delegate can be deleted
    CHECK(opaqueDelegate->opaque_delegate_builder->data);
    armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete(opaqueDelegate);
}

TEST_CASE ("DelegateOptions_ClassicDelegateDefault")
{
    // Check default options can be created
    auto options = TfLiteArmnnDelegateOptionsDefault();

    // Check Classic delegate created
    auto classicDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(options);
    CHECK(classicDelegate);

    // Check Classic Delegate can be deleted
    CHECK(classicDelegate->data_);
    armnnDelegate::TfLiteArmnnDelegateDelete(classicDelegate);
}

}

} // namespace armnnDelegate