aboutsummaryrefslogtreecommitdiff
path: root/samples/DynamicSample.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'samples/DynamicSample.cpp')
-rw-r--r--samples/DynamicSample.cpp80
1 files changed, 80 insertions, 0 deletions
diff --git a/samples/DynamicSample.cpp b/samples/DynamicSample.cpp
new file mode 100644
index 0000000000..3abe12f309
--- /dev/null
+++ b/samples/DynamicSample.cpp
@@ -0,0 +1,80 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/Utils.hpp>
+#include <armnn/Descriptors.hpp>
+
+#include <iostream>
+
+/// A simple example of using the ArmNN SDK API. In this sample, the users single input number is multiplied by 1.0f
+/// using a fully connected layer with a single neuron to produce an output number that is the same as the input.
+int main()
+{
+ using namespace armnn;
+
+ // Construct ArmNN network
+ armnn::NetworkId networkIdentifier;
+ INetworkPtr myNetwork = INetwork::Create();
+
+ IConnectableLayer* input0 = myNetwork->AddInputLayer(0);
+ IConnectableLayer* input1 = myNetwork->AddInputLayer(1);
+ IConnectableLayer* add = myNetwork->AddAdditionLayer();
+ IConnectableLayer* output = myNetwork->AddOutputLayer(0);
+
+ input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
+ input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ armnn::IRuntimePtr run(armnn::IRuntime::Create(options));
+
+ // Optimise ArmNN network
+ armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec());
+ if (!optNet)
+ {
+ // This shouldn't happen for this simple sample, with reference backend.
+ // But in general usage Optimize could fail if the hardware at runtime cannot
+ // support the model that has been provided.
+ std::cerr << "Error: Failed to optimise the input network." << std::endl;
+ return 1;
+ }
+
+ // Load graph into runtime
+ run->LoadNetwork(networkIdentifier, std::move(optNet));
+
+ // input data
+ std::vector<float> input0Data
+ {
+ 5.0f, 3.0f
+ };
+ std::vector<float> input1Data
+ {
+ 10.0f, 8.0f
+ };
+ std::vector<float> outputData(2);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())},
+ {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+ };
+
+ // Execute network
+ run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+ std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl;
+ return 0;
+}