aboutsummaryrefslogtreecommitdiff
path: root/samples/DynamicSample.cpp
blob: 3abe12f3098835cc182bd2768fe71f2a13915414 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
//
// Copyright © 2020 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/INetwork.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/Utils.hpp>
#include <armnn/Descriptors.hpp>

#include <iostream>

/// A simple example of using the ArmNN SDK API. In this sample, the users single input number is multiplied by 1.0f
/// using a fully connected layer with a single neuron to produce an output number that is the same as the input.
int main()
{
    using namespace armnn;

    // Construct ArmNN network
    armnn::NetworkId networkIdentifier;
    INetworkPtr myNetwork = INetwork::Create();

    IConnectableLayer* input0 = myNetwork->AddInputLayer(0);
    IConnectableLayer* input1 = myNetwork->AddInputLayer(1);
    IConnectableLayer* add    = myNetwork->AddAdditionLayer();
    IConnectableLayer* output = myNetwork->AddOutputLayer(0);

    input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
    input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));

    TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
    input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
    input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
    add->GetOutputSlot(0).SetTensorInfo(tensorInfo);

    // Create ArmNN runtime
    IRuntime::CreationOptions options; // default options
    armnn::IRuntimePtr run(armnn::IRuntime::Create(options));

    // Optimise ArmNN network
    armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec());
    if (!optNet)
    {
        // This shouldn't happen for this simple sample, with reference backend.
        // But in general usage Optimize could fail if the hardware at runtime cannot
        // support the model that has been provided.
        std::cerr << "Error: Failed to optimise the input network." << std::endl;
        return 1;
    }

    // Load graph into runtime
    run->LoadNetwork(networkIdentifier, std::move(optNet));

    // input data
    std::vector<float> input0Data
        {
            5.0f, 3.0f
        };
    std::vector<float> input1Data
        {
            10.0f, 8.0f
        };
    std::vector<float> outputData(2);

    InputTensors inputTensors
        {
            {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())},
            {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
        };
    OutputTensors outputTensors
        {
            {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
        };

    // Execute network
    run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);

    std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl;
    return 0;
}