aboutsummaryrefslogtreecommitdiff
path: root/samples
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-02-03 12:29:56 +0000
committerJim Flynn <jim.flynn@arm.com>2020-02-03 16:36:55 +0000
commit867eba59ffd2276086a14f7b2632b390c94392d3 (patch)
treed7626f933f2a72e8c398c0f43c71b4950e980a09 /samples
parent9d0ff74843b9d36d9f233d208e18a96de1b7d47b (diff)
downloadarmnn-867eba59ffd2276086a14f7b2632b390c94392d3.tar.gz
IVGCVSW-4399 Create Sample Dynamic backend
* Move IWorkload and WorkloadInfo to include/armnn/backends * Add simple sample dynamic backend with addition workload * Add sample example to run dynamic backend * Unit tests Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I0753ce35b8e8a6223a1471388b49246d82438a44
Diffstat (limited to 'samples')
-rw-r--r--samples/CMakeLists.txt5
-rw-r--r--samples/DynamicSample.cpp80
2 files changed, 85 insertions, 0 deletions
diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt
index 640d5cd705..5505de0bec 100644
--- a/samples/CMakeLists.txt
+++ b/samples/CMakeLists.txt
@@ -2,3 +2,8 @@ if(BUILD_SAMPLE_APP AND ARMNNREF)
add_executable(SimpleSample SimpleSample.cpp)
target_link_libraries(SimpleSample armnn ${CMAKE_THREAD_LIBS_INIT})
endif()
+
+if(SAMPLE_DYNAMIC_BACKEND)
+ add_executable(DynamicSample DynamicSample.cpp)
+ target_link_libraries(DynamicSample armnn ${CMAKE_THREAD_LIBS_INIT})
+endif()
diff --git a/samples/DynamicSample.cpp b/samples/DynamicSample.cpp
new file mode 100644
index 0000000000..3abe12f309
--- /dev/null
+++ b/samples/DynamicSample.cpp
@@ -0,0 +1,80 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/Utils.hpp>
+#include <armnn/Descriptors.hpp>
+
+#include <iostream>
+
+/// A simple example of using the ArmNN SDK API. In this sample, the users single input number is multiplied by 1.0f
+/// using a fully connected layer with a single neuron to produce an output number that is the same as the input.
+int main()
+{
+ using namespace armnn;
+
+ // Construct ArmNN network
+ armnn::NetworkId networkIdentifier;
+ INetworkPtr myNetwork = INetwork::Create();
+
+ IConnectableLayer* input0 = myNetwork->AddInputLayer(0);
+ IConnectableLayer* input1 = myNetwork->AddInputLayer(1);
+ IConnectableLayer* add = myNetwork->AddAdditionLayer();
+ IConnectableLayer* output = myNetwork->AddOutputLayer(0);
+
+ input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
+ input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ armnn::IRuntimePtr run(armnn::IRuntime::Create(options));
+
+ // Optimise ArmNN network
+ armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec());
+ if (!optNet)
+ {
+ // This shouldn't happen for this simple sample, with reference backend.
+ // But in general usage Optimize could fail if the hardware at runtime cannot
+ // support the model that has been provided.
+ std::cerr << "Error: Failed to optimise the input network." << std::endl;
+ return 1;
+ }
+
+ // Load graph into runtime
+ run->LoadNetwork(networkIdentifier, std::move(optNet));
+
+ // input data
+ std::vector<float> input0Data
+ {
+ 5.0f, 3.0f
+ };
+ std::vector<float> input1Data
+ {
+ 10.0f, 8.0f
+ };
+ std::vector<float> outputData(2);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())},
+ {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+ };
+
+ // Execute network
+ run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+ std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl;
+ return 0;
+}