ArmNN
 22.05.01
DynamicSample.cpp

This is simple example that shows how to use a dynamic backend. Dynamic Backends can be compiled as standalone against Arm NN and can be loaded by Arm NN dynamically at runtime. This way you can quickly integrate new backends without having to worry or recompile Arm NN.

This example makes use of a very simplistic dynamic backend called 'SampleDynamic'. There is a guide that tells you more about dynamic backends and how this particular backend was created so you can create a dynamic backend yourself Dynamically loadable Backend.

//
// Copyright © 2020 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/Utils.hpp>
#include <iostream>
/// A simple example of using the ArmNN SDK API with the standalone sample dynamic backend.
/// In this example, an addition layer is used to add 2 input tensors to produce a result output tensor.
int main()
{
using namespace armnn;
// Construct ArmNN network
armnn::NetworkId networkIdentifier;
IConnectableLayer* input0 = myNetwork->AddInputLayer(0);
IConnectableLayer* input1 = myNetwork->AddInputLayer(1);
IConnectableLayer* add = myNetwork->AddAdditionLayer();
IConnectableLayer* output = myNetwork->AddOutputLayer(0);
input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// Create ArmNN runtime
IRuntime::CreationOptions options; // default options
// Optimise ArmNN network
armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec());
if (!optNet)
{
// This shouldn't happen for this simple sample, with reference backend.
// But in general usage Optimize could fail if the hardware at runtime cannot
// support the model that has been provided.
std::cerr << "Error: Failed to optimise the input network." << std::endl;
return 1;
}
// Load graph into runtime
run->LoadNetwork(networkIdentifier, std::move(optNet));
// input data
std::vector<float> input0Data
{
5.0f, 3.0f
};
std::vector<float> input1Data
{
10.0f, 8.0f
};
std::vector<float> outputData(2);
TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
{0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
{1,armnn::ConstTensor(inputTensorInfo, input1Data.data())}
};
OutputTensors outputTensors
{
{0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
};
// Execute network
run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl;
return 0;
}