aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2023-10-02 17:01:37 +0100
committerColm Donelan <colm.donelan@arm.com>2023-10-19 07:27:02 +0000
commit0aef653469eebbdf88308b7fbc6bb78452d380d0 (patch)
tree868fb9ec011d5c4f05950e8bca5e54c92e645358
parentb41793a9f9afc43fb04a991ca819818fca8faab8 (diff)
downloadarmnn-0aef653469eebbdf88308b7fbc6bb78452d380d0.tar.gz
IVGCVSW-7731 Add a test executables that verify released header files.
Create a CMake project with executables to exercise the external interfaces of Arm NN. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: I1e3a8ed726903aac5f52d78c55d2e1b1352c8362
-rw-r--r--tests/InterfaceTests/CMakeLists.txt121
-rw-r--r--tests/InterfaceTests/ClassicDelegateTest.cpp59
-rw-r--r--tests/InterfaceTests/OnnxParserTest.cpp207
-rw-r--r--tests/InterfaceTests/OpaqueDelegateTest.cpp67
-rw-r--r--tests/InterfaceTests/README.md31
-rw-r--r--tests/InterfaceTests/TfLiteParserTest.cpp78
-rw-r--r--tests/InterfaceTests/simple_conv2d_1_op.tflitebin0 -> 928 bytes
7 files changed, 563 insertions, 0 deletions
diff --git a/tests/InterfaceTests/CMakeLists.txt b/tests/InterfaceTests/CMakeLists.txt
new file mode 100644
index 0000000000..6521e7c302
--- /dev/null
+++ b/tests/InterfaceTests/CMakeLists.txt
@@ -0,0 +1,121 @@
+#
+# Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+# Usage: cmake -DARMNN_ROOT=<path to armnn library and includes>
+
+cmake_minimum_required (VERSION 3.7.0)
+project(InterfaceTests)
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+# Reusing the TfLite cmake module from Arm NN. This module should have no
+# external dependencies on other parts of Arm NN. Its only required
+# parameter is TFLITE_LIB_ROOT
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/../../delegate/cmake/Modules/")
+
+# This is the base path for the Arm NN binary build. The root directory
+# contains the library files and also an include directory.
+option(ARMNN_ROOT "Location where the Arm NN libraries and 'include' directory can be found" Off)
+
+# TfLite needs flatbuffers.
+option(FLATBUFFERS_ROOT "Location where Flatbuffers include files can be found." Off)
+
+# To test both delegates we need TfLite.
+option(TFLITE_INCLUDE_ROOT "Location where the TfLite includes can be found." Off)
+option(TFLITE_LIB_ROOT "Location where the built TfLite libraries can be found." Off)
+
+
+# Locate the Arm NN libraries and includes.
+message(STATUS "Arm NN root is set to \"${ARMNN_ROOT}\"")
+# Exclude default paths to ensure only the specified path is used.
+find_path(ARMNN_LIB_INCLUDE armnn/ArmNN.hpp PATHS ${ARMNN_ROOT}/include NO_DEFAULT_PATH)
+message(STATUS "Arm NN library include directory located at: ${ARMNN_LIB_INCLUDE}")
+find_library(ARMNN_LIB
+ NAMES armnn
+ HINTS ${ARMNN_ROOT})
+message(STATUS "Arm NN library location set to ${ARMNN_LIB}")
+
+# Arm NN uses pthreads.
+find_package(Threads)
+
+# First test is the use of the Arm NN graph interface. We compile
+# SimpleSample into a binary based on the libraries and includes we have
+# found.
+add_executable(SimpleSample ../../samples/SimpleSample.cpp)
+# We're using BEFORE here to ensure no stray system libraries get used first.
+target_include_directories(SimpleSample BEFORE PUBLIC ${ARMNN_LIB_INCLUDE})
+target_link_libraries(SimpleSample ${ARMNN_LIB} ${CMAKE_THREAD_LIBS_INIT})
+
+# TfLite parser.
+find_library(ARMNN_TFLITE_LIB
+ NAMES armnnTfLiteParser
+ HINTS ${ARMNN_ROOT})
+add_executable(TfLiteParserTest ./TfLiteParserTest.cpp)
+target_include_directories(TfLiteParserTest BEFORE PUBLIC ${ARMNN_LIB_INCLUDE})
+target_link_libraries(TfLiteParserTest ${ARMNN_LIB} ${ARMNN_TFLITE_LIB} ${CMAKE_THREAD_LIBS_INIT})
+
+# Onnx parser
+find_library(ARMNN_ONNX_LIB
+ NAMES armnnOnnxParser
+ HINTS ${ARMNN_ROOT})
+add_executable(OnnxParserTest ./OnnxParserTest.cpp)
+target_include_directories(OnnxParserTest BEFORE PUBLIC ${ARMNN_LIB_INCLUDE})
+target_link_libraries(OnnxParserTest ${ARMNN_LIB} ${ARMNN_ONNX_LIB} ${CMAKE_THREAD_LIBS_INIT})
+
+
+# These next two targets are for our TfLite delegates. The machanism used to
+# compile and use them does not confirm to how Tf intends delegates to be
+# used. We include these tests to highlight potential missing, or awkwrd,
+# includes that could occur.
+
+# Find Flatbuffers dependency
+find_package(Flatbuffers REQUIRED MODULE)
+
+# Find TfLite libraries and includes.
+find_package(TfLite REQUIRED MODULE)
+find_path(TFLITE_INCLUDE tensorflow/c/c_api.h PATHS ${TFLITE_INCLUDE_ROOT} NO_DEFAULT_PATH)
+
+# Classic delegate
+find_library(ARMNN_CLASSIC_DELEGATE_LIB
+ NAMES armnnDelegate
+ HINTS ${ARMNN_ROOT})
+add_executable(ClassicDelegateTest ./ClassicDelegateTest.cpp)
+find_path(ARMNN_COMMON_DELEGATE_INCLUDE DelegateOptions.hpp PATHS ${ARMNN_ROOT}/include/armnnDelegate/armnn/delegate/common/include/ NO_DEFAULT_PATH)
+find_path(ARMNN_CLASSIC_DELEGATE_INCLUDE armnn_delegate.hpp PATHS ${ARMNN_ROOT}/include/armnnDelegate/armnn/delegate/classic/include/ NO_DEFAULT_PATH)
+target_include_directories(ClassicDelegateTest BEFORE PUBLIC ${ARMNN_LIB_INCLUDE}
+ ${Flatbuffers_INCLUDE_DIR}
+ ${TFLITE_INCLUDE}
+ ${ARMNN_COMMON_DELEGATE_INCLUDE}
+ ${ARMNN_CLASSIC_DELEGATE_INCLUDE})
+target_link_libraries(ClassicDelegateTest ${TfLite_LIB}
+ ${ARMNN_CLASSIC_DELEGATE_LIB}
+ ${ARMNN_LIB}
+ ${Flatbuffers_LIB}
+ ${CMAKE_THREAD_LIBS_INIT}
+ ${CMAKE_DL_LIBS})
+
+# Opaque delegate
+find_library(ARMNN_OPAQUE_DELEGATE_LIB
+ NAMES armnnOpaqueDelegate
+ HINTS ${ARMNN_ROOT})
+# Additional Absl Sync for Opaque Delegate
+find_package(TfLiteAbsl REQUIRED MODULE)
+
+add_executable(OpaqueDelegateTest ./OpaqueDelegateTest.cpp)
+find_path(ARMNN_OPAQUE_DELEGATE_INCLUDE armnn_delegate.hpp PATHS ${ARMNN_ROOT}/include/armnnDelegate/armnn/delegate/opaque/include/ NO_DEFAULT_PATH)
+# delegate_registry.h happens to use a ABSL mutex. We need to find and add its path too.
+find_path(TFLITE_ABSL_INCLUDE absl/synchronization/mutex.h PATHS ${TFLITE_LIB_ROOT}/abseil-cpp/ NO_DEFAULT_PATH)
+target_include_directories(OpaqueDelegateTest BEFORE PUBLIC ${ARMNN_LIB_INCLUDE}
+ ${Flatbuffers_INCLUDE_DIR}
+ ${TFLITE_INCLUDE}
+ ${TFLITE_ABSL_INCLUDE}
+ ${ARMNN_COMMON_DELEGATE_INCLUDE}
+ ${ARMNN_OPAQUE_DELEGATE_INCLUDE})
+target_link_libraries(OpaqueDelegateTest ${ARMNN_OPAQUE_DELEGATE_LIB}
+ ${ARMNN_LIB}
+ ${TfLite_Extra_Absl_LIB}
+ ${TfLite_LIB}
+ ${Flatbuffers_LIB}
+ ${CMAKE_THREAD_LIBS_INIT}
+ ${CMAKE_DL_LIBS})
diff --git a/tests/InterfaceTests/ClassicDelegateTest.cpp b/tests/InterfaceTests/ClassicDelegateTest.cpp
new file mode 100644
index 0000000000..2146c70e8c
--- /dev/null
+++ b/tests/InterfaceTests/ClassicDelegateTest.cpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn_delegate.hpp>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+
+#include <string>
+
+int main()
+{
+ std::unique_ptr<tflite::FlatBufferModel> model;
+ model = tflite::FlatBufferModel::BuildFromFile("./simple_conv2d_1_op.tflite");
+ if (!model)
+ {
+ std::cout << "Failed to load TfLite model from: ./simple_conv2d_1_op.tflite" << std::endl;
+ return -1;
+ }
+ std::unique_ptr<tflite::Interpreter> tfLiteInterpreter;
+ tfLiteInterpreter = std::make_unique<tflite::Interpreter>();
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ tflite::InterpreterBuilder builder(*model, resolver);
+ if (builder(&tfLiteInterpreter) != kTfLiteOk)
+ {
+ std::cout << "Error loading the model into the TfLiteInterpreter." << std::endl;
+ return -1;
+ }
+
+ // Create the Armnn Delegate
+ // Populate a DelegateOptions from the ExecuteNetworkParams.
+ armnnDelegate::DelegateOptions delegateOptions(armnn::Compute::CpuRef);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> theArmnnDelegate(
+ armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete);
+ // Register armnn_delegate to TfLiteInterpreter
+ auto result = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+ if (result != kTfLiteOk)
+ {
+ std::cout << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter." << std::endl;
+ return -1;
+ }
+ if (tfLiteInterpreter->AllocateTensors() != kTfLiteOk)
+ {
+ std::cout << "Failed to allocate tensors in the TfLiteInterpreter." << std::endl;
+ return -1;
+ }
+
+ // Really should populate the tensors here, but it'll work without it.
+
+ int status = tfLiteInterpreter->Invoke();
+ if (status != kTfLiteOk)
+ {
+ std::cout << "Inference failed." << std::endl;
+ return -1;
+ }
+}
diff --git a/tests/InterfaceTests/OnnxParserTest.cpp b/tests/InterfaceTests/OnnxParserTest.cpp
new file mode 100644
index 0000000000..f6dc9a42b2
--- /dev/null
+++ b/tests/InterfaceTests/OnnxParserTest.cpp
@@ -0,0 +1,207 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnnOnnxParser/IOnnxParser.hpp>
+#include <iostream>
+
+int main()
+{
+ // Raw protobuf text for a single layer CONV2D model.
+ std::string m_Prototext = R"(
+ ir_version: 3
+ producer_name: "CNTK"
+ producer_version: "2.5.1"
+ domain: "ai.cntk"
+ model_version: 1
+ graph {
+ name: "CNTKGraph"
+ input {
+ name: "Input"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "Weight"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 3
+ }
+ dim {
+ dim_value: 3
+ }
+ }
+ }
+ }
+ }
+ initializer {
+ dims: 1
+ dims: 1
+ dims: 3
+ dims: 3
+ data_type: 1
+ float_data: 2
+ float_data: 1
+ float_data: 0
+ float_data: 6
+ float_data: 2
+ float_data: 1
+ float_data: 4
+ float_data: 1
+ float_data: 2
+ name: "Weight"
+ }
+ node {
+ input: "Input"
+ input: "Weight"
+ output: "Output"
+ name: "Convolution"
+ op_type: "Conv"
+ attribute {
+ name: "kernel_shape"
+ ints: 3
+ ints: 3
+ type: INTS
+ }
+ attribute {
+ name: "strides"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ attribute {
+ name: "auto_pad"
+ s: "VALID"
+ type: STRING
+ }
+ attribute {
+ name: "group"
+ i: 1
+ type: INT
+ }
+ attribute {
+ name: "dilations"
+ ints: 1
+ ints: 1
+ type: INTS
+ }
+ doc_string: ""
+ domain: ""
+ }
+ output {
+ name: "Output"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ opset_import {
+ version: 7
+ })";
+
+ using namespace armnn;
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ IRuntimePtr runtime = IRuntime::Create(options);
+ // Create the parser.
+ armnnOnnxParser::IOnnxParserPtr parser = armnnOnnxParser::IOnnxParser::Create();
+ try
+ {
+ // Parse the proto text.
+ armnn::INetworkPtr network = parser->CreateNetworkFromString(m_Prototext);
+ auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, runtime->GetDeviceSpec());
+ if (!optimized)
+ {
+ std::cout << "Error: Failed to optimise the input network." << std::endl;
+ return 1;
+ }
+ armnn::NetworkId networkId;
+ std::string errorMsg;
+ Status status = runtime->LoadNetwork(networkId, std::move(optimized), errorMsg);
+ if (status != Status::Success)
+ {
+ std::cout << "Error: Failed to load the optimized network." << std::endl;
+ return -1;
+ }
+
+ // Setup the input and output.
+ std::vector<armnnOnnxParser::BindingPointInfo> inputBindings;
+ // Coz we know the model we know the input tensor is called Input and output is Output.
+ inputBindings.push_back(parser->GetNetworkInputBindingInfo("Input"));
+ std::vector<armnnOnnxParser::BindingPointInfo> outputBindings;
+ outputBindings.push_back(parser->GetNetworkOutputBindingInfo("Output"));
+ // Allocate input tensors
+ armnn::InputTensors inputTensors;
+ std::vector<float> in_data(inputBindings[0].second.GetNumElements());
+ TensorInfo inputTensorInfo(inputBindings[0].second);
+ inputTensorInfo.SetConstant(true);
+ // Set some kind of values in the input.
+ for (int i = 0; i < inputBindings[0].second.GetNumElements(); i++)
+ {
+ in_data[i] = 1.0f + i;
+ }
+ inputTensors.push_back({ inputBindings[0].first, armnn::ConstTensor(inputTensorInfo, in_data.data()) });
+
+ // Allocate output tensors
+ armnn::OutputTensors outputTensors;
+ std::vector<float> out_data(outputBindings[0].second.GetNumElements());
+ outputTensors.push_back({ outputBindings[0].first, armnn::Tensor(outputBindings[0].second, out_data.data()) });
+
+ runtime->EnqueueWorkload(networkId, inputTensors, outputTensors);
+ runtime->UnloadNetwork(networkId);
+ // We're finished with the parser.
+ armnnOnnxParser::IOnnxParser::Destroy(parser.get());
+ parser.release();
+ }
+ catch (const std::exception& e) // Could be an InvalidArgumentException or a ParseException.
+ {
+ std::cout << "Unable to create parser for the passed protobuf string. Reason: " << e.what() << std::endl;
+ return -1;
+ }
+ return 0;
+}
diff --git a/tests/InterfaceTests/OpaqueDelegateTest.cpp b/tests/InterfaceTests/OpaqueDelegateTest.cpp
new file mode 100644
index 0000000000..240a295393
--- /dev/null
+++ b/tests/InterfaceTests/OpaqueDelegateTest.cpp
@@ -0,0 +1,67 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn_delegate.hpp>
+
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/core/model.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+
+int main()
+{
+ std::unique_ptr<tflite::FlatBufferModel> model;
+ model = tflite::FlatBufferModel::BuildFromFile("./simple_conv2d_1_op.tflite");
+ if (!model)
+ {
+ std::cout << "Failed to load TfLite model from: ./simple_conv2d_1_op.tflite" << std::endl;
+ return -1;
+ }
+ std::unique_ptr<tflite::Interpreter> m_TfLiteInterpreter;
+ m_TfLiteInterpreter = std::make_unique<tflite::Interpreter>();
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ tflite::InterpreterBuilder builder(*model, resolver);
+ if (builder(&m_TfLiteInterpreter) != kTfLiteOk)
+ {
+ std::cout << "Error loading the model into the TfLiteInterpreter." << std::endl;
+ return -1;
+ }
+ // Use default settings until options have been enabled
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+ tflite::TFLiteSettingsBuilder tfliteSettingsBuilder(flatBufferBuilder);
+ flatbuffers::Offset<tflite::TFLiteSettings> tfliteSettings = tfliteSettingsBuilder.Finish();
+ flatBufferBuilder.Finish(tfliteSettings);
+ const tflite::TFLiteSettings* settings =
+ flatbuffers::GetRoot<tflite::TFLiteSettings>(flatBufferBuilder.GetBufferPointer());
+
+ std::unique_ptr<tflite::delegates::DelegatePluginInterface> delegatePlugIn =
+ tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *settings);
+
+ // Create Armnn Opaque Delegate from Armnn Delegate Plugin
+ tflite::delegates::TfLiteDelegatePtr armnnDelegate = delegatePlugIn->Create();
+
+ // Add Delegate to the builder
+ builder.AddDelegate(armnnDelegate.get());
+ if (builder(&m_TfLiteInterpreter) != kTfLiteOk)
+ {
+ std::cout << "Unable to add the Arm NN delegate to the TfLite runtime." << std::endl;
+ return -1;
+ }
+
+ if (m_TfLiteInterpreter->AllocateTensors() != kTfLiteOk)
+ {
+ std::cout << "Failed to allocate tensors in the TfLiteInterpreter." << std::endl;
+ return -1;
+ }
+
+ // Really should populate the tensors here, but it'll work without it.
+
+ int status = m_TfLiteInterpreter->Invoke();
+ if (status != kTfLiteOk)
+ {
+ std::cout << "Inference failed." << std::endl;
+ return -1;
+ }
+}
diff --git a/tests/InterfaceTests/README.md b/tests/InterfaceTests/README.md
new file mode 100644
index 0000000000..bd8ec33cd0
--- /dev/null
+++ b/tests/InterfaceTests/README.md
@@ -0,0 +1,31 @@
+# Interface tests for the release Arm NN binary package.
+
+These are a small number of executables that exercise the main interfaces exposed by Arm NN via the binary packages. The intent is to highlight any missing include dependencies or libraries.
+
+## Usage
+The CMakeLists.txt file describes 5 binaries. Each focusing on a different interface. Before attempting to compile you must already have compiled the appropriate versions of Flat Buffers and Tensorflow Lite.
+
+Standard practice for cmake is to create a subdirectory called 'build' and execute from within there.
+
+```bash
+mkdir build
+cd build
+cmake .. -DARMNN_ROOT=<path to the unpacked binary build> -DTFLITE_INCLUDE_ROOT=<directory containing tensorflow/include> -DTFLITE_LIB_ROOT=<directory containing libtensorflow-lite.a> -DFLATBUFFERS_ROOT=<directory containing flatbuffers install>
+make
+```
+
+It is not strictly necessary to execute the built components as this is testing the interface to build rather than correctness of execution.
+
+## Individual tests
+
+### SimpleSample
+This exercies the Arm NN graph interface. It is based on SimpleSample located in armnn/samples/SimpleSample.cpp.
+
+### TfLiteParserTest
+This exercies the Arm NN TfLite parser interface. It will attempt to parse simple_conv2d_1_op.tflite, load it and execute an inference.
+
+### OnnxParserTest
+This exercies the Arm NN Onnx interface. It will attempt to parse a simple convoultion model hard coded in prototext, load it and execute an inference.
+
+### ClassicDelegateTest / OpaqueDelegateTest
+Neither of these tests are strictly necessary as the external interface is only used by the TfLite runtime. Users of Arm NN are never expected to hard code an execution of either TfLite delegate. Instead, the delegate library is presented to the TfLite runtime for it to execute. However, these tests exist just to verify this interface.
diff --git a/tests/InterfaceTests/TfLiteParserTest.cpp b/tests/InterfaceTests/TfLiteParserTest.cpp
new file mode 100644
index 0000000000..1c74741e09
--- /dev/null
+++ b/tests/InterfaceTests/TfLiteParserTest.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <armnn/INetwork.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnnTfLiteParser/ITfLiteParser.hpp>
+
+#include <iostream>
+
+int main()
+{
+ using namespace armnn;
+
+ // Create ArmNN runtime
+ IRuntime::CreationOptions options; // default options
+ IRuntimePtr runtime = IRuntime::Create(options);
+ // Parse a TfLite file.
+ armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
+ try
+ {
+ INetworkPtr myNetwork = parser->CreateNetworkFromBinaryFile("./simple_conv2d_1_op.tflite");
+ // Optimise ArmNN network
+ IOptimizedNetworkPtr optNet = Optimize(*myNetwork, { Compute::CpuRef }, runtime->GetDeviceSpec());
+ if (!optNet)
+ {
+ std::cout << "Error: Failed to optimise the input network." << std::endl;
+ return 1;
+ }
+ NetworkId networkId;
+ // Load graph into runtime
+ Status loaded = runtime->LoadNetwork(networkId, std::move(optNet));
+ if (loaded != Status::Success)
+ {
+ std::cout << "Error: Failed to load the optimized network." << std::endl;
+ return 1;
+ }
+
+ // Setup the input and output.
+ std::vector<armnnTfLiteParser::BindingPointInfo> inputBindings;
+ std::vector<std::string> inputTensorNames = parser->GetSubgraphInputTensorNames(0);
+ inputBindings.push_back(parser->GetNetworkInputBindingInfo(0, inputTensorNames[0]));
+
+ std::vector<armnnTfLiteParser::BindingPointInfo> outputBindings;
+ std::vector<std::string> outputTensorNames = parser->GetSubgraphOutputTensorNames(0);
+ outputBindings.push_back(parser->GetNetworkOutputBindingInfo(0, outputTensorNames[0]));
+ TensorInfo inputTensorInfo(inputBindings[0].second);
+ inputTensorInfo.SetConstant(true);
+
+ // Allocate input tensors
+ armnn::InputTensors inputTensors;
+ std::vector<float> in_data(inputBindings[0].second.GetNumElements());
+ // Set some kind of values in the input.
+ for (int i = 0; i < inputBindings[0].second.GetNumElements(); i++)
+ {
+ in_data[i] = 1.0f + i;
+ }
+ inputTensors.push_back({ inputBindings[0].first, armnn::ConstTensor(inputTensorInfo, in_data.data()) });
+
+ // Allocate output tensors
+ armnn::OutputTensors outputTensors;
+ std::vector<float> out_data(outputBindings[0].second.GetNumElements());
+ outputTensors.push_back({ outputBindings[0].first, armnn::Tensor(outputBindings[0].second, out_data.data()) });
+
+ runtime->EnqueueWorkload(networkId, inputTensors, outputTensors);
+ runtime->UnloadNetwork(networkId);
+ // We're finished with the parser.
+ armnnTfLiteParser::ITfLiteParser::Destroy(parser.get());
+ parser.release();
+ }
+ catch (const std::exception& e) // Could be: InvalidArgumentException, ParseException or a FileNotFoundException.
+ {
+ std::cout << "Unable to create parser for \"./simple_conv2d_1_op.tflite\". Reason: " << e.what() << std::endl;
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/InterfaceTests/simple_conv2d_1_op.tflite b/tests/InterfaceTests/simple_conv2d_1_op.tflite
new file mode 100644
index 0000000000..d0260fed01
--- /dev/null
+++ b/tests/InterfaceTests/simple_conv2d_1_op.tflite
Binary files differ