aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Bentham <Matthew.Bentham@arm.com>2019-12-02 12:59:43 +0000
committerMatthew Bentham <Matthew.Bentham@arm.com>2019-12-02 16:23:45 +0000
commit245d64c60d0ea30f5080ff53225b5169927e24d6 (patch)
treed623e46d7d5ddb34ef3bb84c45df3ada9209ce82
parent88d5f9f1615fa956464b8932b574d85c37cec937 (diff)
downloadarmnn-experimental/pyarmnn.tar.gz
Work in progress of python bindings for Arm NNexperimental/pyarmnn
Not built or tested in any way Signed-off-by: Matthew Bentham <Matthew.Bentham@arm.com> Change-Id: Ie7f92b529aa5087130f0c5cc8c17db1581373236
-rw-r--r--python/pyarmnn/.gitignore214
-rw-r--r--python/pyarmnn/conftest.py47
-rw-r--r--python/pyarmnn/images/pyarmnn.pngbin0 -> 74951 bytes
-rwxr-xr-xpython/pyarmnn/init_devenv.sh25
-rwxr-xr-xpython/pyarmnn/licences.txt21
-rw-r--r--python/pyarmnn/pylintconfig425
-rw-r--r--python/pyarmnn/readme.md206
-rw-r--r--python/pyarmnn/scripts/generate_docs.py47
-rw-r--r--python/pyarmnn/setup.py252
-rw-r--r--python/pyarmnn/src/pyarmnn/__init__.py138
-rw-r--r--python/pyarmnn/src/pyarmnn/_generated/__init__.py2
-rw-r--r--python/pyarmnn/src/pyarmnn/_quantization/__init__.py4
-rw-r--r--python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py70
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/__init__.py6
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py159
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/tensor.py119
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py123
-rw-r--r--python/pyarmnn/src/pyarmnn/_utilities/__init__.py4
-rw-r--r--python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py95
-rw-r--r--python/pyarmnn/src/pyarmnn/_version.py26
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn.i27
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i103
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_onnxparser.i96
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i132
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_tfparser.i102
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/armnn_version.i58
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_backend.i66
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i1000
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_lstmparam.i97
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i1159
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_profiler.i82
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i254
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i313
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i136
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_types_utils.i26
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/standard_header.i53
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/typemaps/network_optimize.i41
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/typemaps/permutation_vector.i52
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_memory.i52
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_shape.i51
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/typemaps/vectors.i235
-rwxr-xr-xpython/pyarmnn/swig_generate.py64
-rw-r--r--python/pyarmnn/test/test_caffe_parser.py133
-rw-r--r--python/pyarmnn/test/test_const_tensor.py199
-rw-r--r--python/pyarmnn/test/test_descriptors.py528
-rw-r--r--python/pyarmnn/test/test_generated.py52
-rw-r--r--python/pyarmnn/test/test_iconnectable.py143
-rw-r--r--python/pyarmnn/test/test_network.py310
-rw-r--r--python/pyarmnn/test/test_onnx_parser.py110
-rw-r--r--python/pyarmnn/test/test_profiling_utilities.py63
-rw-r--r--python/pyarmnn/test/test_quantize_and_dequantize.py79
-rw-r--r--python/pyarmnn/test/test_runtime.py275
-rw-r--r--python/pyarmnn/test/test_setup.py100
-rw-r--r--python/pyarmnn/test/test_supported_backends.py51
-rw-r--r--python/pyarmnn/test/test_tensor.py135
-rw-r--r--python/pyarmnn/test/test_tensor_conversion.py97
-rw-r--r--python/pyarmnn/test/test_tensor_info.py27
-rw-r--r--python/pyarmnn/test/test_tensor_shape.py75
-rw-r--r--python/pyarmnn/test/test_tf_parser.py133
-rw-r--r--python/pyarmnn/test/test_tflite_parser.py173
-rw-r--r--python/pyarmnn/test/test_types.py27
-rw-r--r--python/pyarmnn/test/test_version.py35
-rw-r--r--python/pyarmnn/tox.ini64
63 files changed, 8991 insertions, 0 deletions
diff --git a/python/pyarmnn/.gitignore b/python/pyarmnn/.gitignore
new file mode 100644
index 0000000000..733955abdc
--- /dev/null
+++ b/python/pyarmnn/.gitignore
@@ -0,0 +1,214 @@
+# Created by .ignore support plugin (hsz.mobi)
+### C++ template
+# Prerequisites
+*.d
+
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.obj
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Compiled Dynamic libraries
+*.so
+*.dylib
+*.dll
+
+# Fortran module files
+*.mod
+*.smod
+
+# Compiled Static libraries
+*.lai
+*.la
+*.a
+*.lib
+
+# Executables
+*.exe
+*.out
+*.app
+### Python template
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+*.o
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# Documentation
+docs
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+generated_cxx/*
+!generated_cxx/.keep
+
+src/pyarmnn/_generated/*
+!src/pyarmnn/_generated/.keep
+!src/pyarmnn/_generated/*.py
+!src/pyarmnn/_generated/*.cpp
+.idea
+**/include
diff --git a/python/pyarmnn/conftest.py b/python/pyarmnn/conftest.py
new file mode 100644
index 0000000000..0eea35c7e0
--- /dev/null
+++ b/python/pyarmnn/conftest.py
@@ -0,0 +1,47 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+
+
+@pytest.fixture(scope="module")
+def data_folder_per_test(request):
+ """
+ This fixture returns path to folder with test resources (one per test module)
+ """
+
+ basedir, script = request.fspath.dirname, request.fspath.basename
+ return str(os.path.join(basedir, "testdata", os.path.splitext(script)[0]))
+
+
+@pytest.fixture(scope="module")
+def shared_data_folder(request):
+ """
+ This fixture returns path to folder with shared test resources among all tests
+ """
+
+ return str(os.path.join(request.fspath.dirname, "testdata", "shared"))
+
+
+@pytest.fixture(scope="function")
+def tmpdir(tmpdir):
+ """
+ This fixture returns path to temp folder. Fixture was added for py35 compatibility
+ """
+
+ return str(tmpdir)
+
+
+def pytest_addoption(parser):
+ parser.addoption('--juno', action='store_true', dest="juno",
+ default=False, help="enable juno fpga related tests")
+
+
+def pytest_configure(config):
+ config.addinivalue_line(
+ "markers", "juno: mark test to run only on juno"
+ )
+
+ if not config.option.juno:
+ setattr(config.option, 'markexpr', 'not juno')
diff --git a/python/pyarmnn/images/pyarmnn.png b/python/pyarmnn/images/pyarmnn.png
new file mode 100644
index 0000000000..7a900d8e3a
--- /dev/null
+++ b/python/pyarmnn/images/pyarmnn.png
Binary files differ
diff --git a/python/pyarmnn/init_devenv.sh b/python/pyarmnn/init_devenv.sh
new file mode 100755
index 0000000000..f19a076ef9
--- /dev/null
+++ b/python/pyarmnn/init_devenv.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -e
+
+JENKINS_BUILD=0
+while getopts ":j" opt; do
+ case "$opt" in
+ j) JENKINS_BUILD=1 ;;
+ esac
+done
+
+python3.6 -m venv toxenv
+source toxenv/bin/activate
+pip install virtualenv==16.3.0 tox
+
+tox -e devenv
+# If jenkins build, also run unit tests, generate docs, etc
+if [ $JENKINS_BUILD == 1 ]; then
+ tox
+ tox -e doc
+fi
+
+deactivate
+rm -rf toxenv
+
+source env/bin/activate
diff --git a/python/pyarmnn/licences.txt b/python/pyarmnn/licences.txt
new file mode 100755
index 0000000000..21ab6a6d7a
--- /dev/null
+++ b/python/pyarmnn/licences.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 ARM Limited.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/python/pyarmnn/pylintconfig b/python/pyarmnn/pylintconfig
new file mode 100644
index 0000000000..970c21e130
--- /dev/null
+++ b/python/pyarmnn/pylintconfig
@@ -0,0 +1,425 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS,generated,_generated
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=_version.py
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=print-statement,parameter-unpacking,unpacking-in-except,old-raise-syntax,backtick,long-suffix,old-ne-operator,old-octal-literal,import-star-module-level,raw-checker-failed,bad-inline-option,locally-disabled,locally-enabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,apply-builtin,basestring-builtin,buffer-builtin,cmp-builtin,coerce-builtin,execfile-builtin,file-builtin,long-builtin,raw_input-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,no-absolute-import,old-division,dict-iter-method,dict-view-method,next-method-called,metaclass-assignment,indexing-exception,raising-string,reload-builtin,oct-method,hex-method,nonzero-method,cmp-method,input-builtin,round-builtin,intern-builtin,unichr-builtin,map-builtin-not-iterating,zip-builtin-not-iterating,range-builtin-not-iterating,filter-builtin-not-iterating,using-cmp-argument,eq-without-hash,div-method,idiv-method,rdiv-method,exception-message-attribute,invalid-str-codec,sys-max-int,bad-python3-import,deprecated-string-function,deprecated-str-translate-call
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=colorized
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[BASIC]
+
+# Naming hint for argument names
+argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct argument names
+argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Naming hint for attribute names
+attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct attribute names
+attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming hint for function names
+function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct function names
+function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for method names
+method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct method names
+method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming hint for variable names
+variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct variable names
+variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=120
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,future.builtins
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=optparse,tkinter.tix
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/python/pyarmnn/readme.md b/python/pyarmnn/readme.md
new file mode 100644
index 0000000000..0633fd5125
--- /dev/null
+++ b/python/pyarmnn/readme.md
@@ -0,0 +1,206 @@
+# About PyArmNN
+
+PyArmNN is a python extension for [Arm NN SDK](https://developer.arm.com/ip-products/processors/machine-learning/arm-nn).
+PyArmNN provides interface similar to Arm NN C++ Api.
+Before you proceed with the project setup, you will need to checkout and build a corresponding Arm NN version.
+
+PyArmNN is built around public headers from the armnn/include folder of Arm NN. PyArmNN does not implement any computation kernels itself, all operations are
+delegated to the Arm NN library.
+
+The [SWIG](http://www.swig.org/) project is used to generate the Arm NN python shadow classes and C wrapper.
+
+The following diagram shows the conceptual architecture of this library:
+![PyArmNN](./images/pyarmnn.png)
+
+# PyArmNN installation
+
+PyArmNN can be distributed as a source package or a binary package (wheel).
+
+Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
+
+* Linux x86 64bit machine: pyarmnn-19.11.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-19.11.0-cp36-cp36m-*linux_aarch64*.whl
+
+The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
+
+Both of them, source and binary package, require the Arm NN library to be present on the target/build machine.
+
+It is strongly suggested to work within a python virtual environment. The further steps assume that the virtual environment was created and activated before running PyArmNN installation commands.
+
+PyArmNN also depends on Numpy python library. It will be automatically downloaded and installed alongside PyArmNN. If your machine does not have access to Python pip repository you might need to install Numpy in advance by following public instructions: https://scipy.org/install.html
+
+## Installing from wheel
+
+Make sure that Arm NN binaries and Arm NN dependencies are installed and can be found in one of the system default library locations. You can check default locations by executing the following command:
+```bash
+$ gcc --print-search-dirs
+```
+Install PyArmNN from binary by pointing to the wheel file:
+```bash
+$ pip install /path/to/pyarmnn-19.11.0-cp36-cp36m-linux_aarch64.whl
+```
+
+## Installing from source package
+
+Alternatively, you can install from source. This is the more reliable way but requires a little more effort on the users part.
+
+While installing from sources, you have the freedom of choosing Arm NN libraries location. Set environment variables *ARMNN_LIB* and *ARMNN_INCLUDE* to point to Arm NN libraries and headers.
+If you want to use system default locations, just set *ARMNN_INCLUDE* to point to Arm NN headers.
+
+```bash
+$ export ARMNN_LIB=/path/to/libs
+$ export ARMNN_INCLUDE=/path/to/headers
+```
+
+Install PyArmNN as follows:
+```bash
+$ pip install /path/to/pyarmnn-19.11.0.tar.gz
+```
+
+If PyArmNN installation script fails to find Arm NN libraries it will raise an error like this
+
+`RuntimeError: ArmNN library was not found in ('/usr/lib/gcc/aarch64-linux-gnu/8/', <...> ,'/lib/', '/usr/lib/'). Please install ArmNN to one of the standard locations or set correct ARMNN_INCLUDE and ARMNN_LIB env variables.`
+
+You can now verify that PyArmNN library is installed and check PyArmNN version using:
+```bash
+$ pip show pyarmnn
+```
+You can also verify it by running the following and getting output similar to below:
+```bash
+$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
+'20191100'
+```
+
+# PyArmNN API overview
+
+#### Getting started
+The easiest way to begin using PyArmNN is by using the Parsers. We will demonstrate how to use them below:
+
+Create a parser object and load your model file.
+```python
+import pyarmnn as ann
+import imageio
+
+# ONNX, Caffe and TF parsers also exist.
+parser = ann.ITfLiteParser()
+network = parser.CreateNetworkFromBinaryFile('./model.tflite')
+```
+
+Get the input binding information by using the name of the input layer.
+```python
+input_binding_info = parser.GetNetworkInputBindingInfo(0, 'model/input')
+
+# Create a runtime object that will perform inference.
+options = ann.CreationOptions()
+runtime = ann.IRuntime(options)
+```
+Choose preferred backends for execution and optimize the network.
+```python
+# Backend choices earlier in the list have higher preference.
+preferredBackends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
+opt_network, messages = ann.Optimize(network, preferredBackends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+# Load the optimized network into the runtime.
+net_id, _ = runtime.LoadNetwork(opt_network)
+```
+Make workload tensors using input and output binding information.
+```python
+# Load an image and create an inputTensor for inference.
+img = imageio.imread('./image.png')
+input_tensors = ann.make_input_tensors([input_binding_info], [img])
+
+# Get output binding information for an output layer by using the layer name.
+output_binding_info = parser.GetNetworkOutputBindingInfo(0, 'model/output')
+output_tensors = ann.make_output_tensors([outputs_binding_info])
+```
+
+Perform inference and get the results back into a numpy array.
+```python
+runtime.EnqueueWorkload(0, input_tensors, output_tensors)
+
+results = ann.workload_tensors_to_ndarray(output_tensors)
+print(results)
+```
+
+# Setup development environment
+
+Before, proceeding to the next steps, make sure that:
+
+1. You have Python 3.6+ installed system-side. The package is not compatible with older Python versions.
+2. You have python3.6-dev installed system-side. This contains header files needed to build PyArmNN extension module.
+3. In case you build Python from sources manually, make sure that the following libraries are installed and available in you system:
+``python3.6-dev build-essential checkinstall libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev``
+4. install SWIG, swig must be version 4.*
+
+## Setup virtual environment
+Now you can proceed with setting up workspace:
+
+1. Set environment variables ARMNN_LIB (pointing to Arm NN libraries) and ARMNN_INCLUDE (pointing to Arm NN headers)
+2. Create development env using script ``source init_devenv.sh``
+
+## Generating SWIG wrappers
+Before building package or running tests you need to generate SWIG wrappers based on the interface files.
+It can be done with tox target 'gen':
+
+```bash
+tox -e gen
+```
+
+## Running unit-tests
+
+Execute command from the project root dir:
+
+```bash
+$ tox
+```
+or
+```bash
+$ pytest -v
+```
+
+## Build python distr
+
+Python supports source and binary distribution packages.
+
+Source distr contains setup.py script that is executed on the users machine during package installation.
+When preparing binary distr (wheel), setup.py is executed on the build machine and the resulting package contains only the result
+of the build (generated files and resources, test results etc).
+
+In our case, PyArmNN depends on Arm NN installation. Thus, binary distr will be linked with
+the local build machine libraries and runtime.
+
+### Source distr
+
+```bash
+$ python setup.py clean --all
+$ python setup.py sdist
+```
+
+As the result you will get `./dist/pyarmnn-19.11.0.tar.gz` file. As you can see it is platform independent.
+
+### Wheel
+
+```bash
+$ export ARMNN_LIB=...
+$ export ARMNN_INCLUDE=...
+$ python setup.py clean --all
+$ python setup.py bdist_wheel
+```
+
+As the result you will get something like `./dist/pyarmnn-19.11.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it is platform dependent.
+This command will launch extension build thus you need to have SWIG wrappers generated before running it.
+
+## Regenerate SWIG stubs inplace
+
+If you need to regenerate wrappers based on the new swig interfaces files, you will need to clean existing build folders
+first and then rebuild extension:
+```bash
+$ python setup.py clean --all
+```
+```bash
+$ export ARMNN_LIB=/path/to/armnn/lib
+$ export ARMNN_INCLUDE=/path/to/armnn/include
+$ python setup.py build_ext --inplace
+```
+It will put all generated files under ./src/pyarmnn/_generated folder.
+Thus, this command can be used to re-generated new extensions in development env.
diff --git a/python/pyarmnn/scripts/generate_docs.py b/python/pyarmnn/scripts/generate_docs.py
new file mode 100644
index 0000000000..76c9520083
--- /dev/null
+++ b/python/pyarmnn/scripts/generate_docs.py
@@ -0,0 +1,47 @@
+import os
+import tarfile
+
+import pyarmnn as ann
+import shutil
+
+from typing import List, Union
+
+from pdoc.cli import main
+
+
+def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str):
+ file_paths = [] + file_paths
+
+ if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)):
+ os.makedirs(target_dir_path)
+
+ for file_path in file_paths:
+ if not (os.path.exists(file_path) and os.path.isfile(file_path)):
+ raise RuntimeError('Not a file: {}'.format(file_path))
+
+ file_name = os.path.basename(file_path)
+ shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name))
+
+
+def copy_doc_images():
+ __copy_file_to_dir(file_paths=['./images/pyarmnn.png' ],
+ target_dir_path='docs/pyarmnn/images')
+
+
+def archive_docs(path, version):
+
+ output_filename = f'pyarmnn_docs-{version}.tar'
+
+ with tarfile.open(output_filename, "w") as tar:
+ tar.add(path)
+
+
+if __name__ == "__main__":
+ with open('./readme.md', 'r') as readme_file:
+ top_level_pyarmnn_doc = ''.join(readme_file.readlines())
+ ann.__doc__ = top_level_pyarmnn_doc
+
+ main()
+
+ copy_doc_images()
+ archive_docs('./docs', ann.__version__)
diff --git a/python/pyarmnn/setup.py b/python/pyarmnn/setup.py
new file mode 100644
index 0000000000..cb4dc865a7
--- /dev/null
+++ b/python/pyarmnn/setup.py
@@ -0,0 +1,252 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import logging
+import os
+import sys
+from functools import lru_cache
+from pathlib import Path
+from itertools import chain
+
+from setuptools import setup
+from distutils.core import Extension
+from setuptools.command.build_py import build_py
+from setuptools.command.build_ext import build_ext
+
+logger = logging.Logger(__name__)
+
+__version__ = None
+__arm_ml_version__ = None
+
+
+def check_armnn_version(*args):
+ pass
+
+
+exec(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src', 'pyarmnn', '_version.py')).read())
+
+
+class ExtensionPriorityBuilder(build_py):
+ """
+ Runs extension builder before other stages. Otherwise generated files are not included to the distribution.
+ """
+
+ def run(self):
+ self.run_command('build_ext')
+ return super().run()
+
+
+class ArmnnVersionCheckerExtBuilder(build_ext):
+
+ def __init__(self, dist):
+ super().__init__(dist)
+ self.failed_ext = []
+
+ def build_extension(self, ext):
+ try:
+ super().build_extension(ext)
+ except Exception as err:
+ self.failed_ext.append(ext)
+ logger.warning('Failed to build extension %s. \n %s', ext.name, str(err))
+
+ if ext.name == 'pyarmnn._generated._pyarmnn_version':
+ sys.path.append(os.path.abspath(os.path.join(self.build_lib, str(Path(ext._file_name).parent))))
+ from _pyarmnn_version import GetVersion
+ check_armnn_version(GetVersion(), __arm_ml_version__)
+
+ def copy_extensions_to_source(self):
+
+ for ext in self.failed_ext:
+ self.extensions.remove(ext)
+ super().copy_extensions_to_source()
+
+
+def linux_gcc_lib_search():
+ """
+ Calls the `gcc` to get linker default system paths.
+ Returns:
+ list of paths
+ """
+ cmd = 'gcc --print-search-dirs | grep libraries'
+ cmd_res = os.popen(cmd).read()
+ cmd_res = cmd_res.split('=')
+ if len(cmd_res) > 1:
+ return tuple(cmd_res[1].split(':'))
+ return None
+
+
+def find_includes(armnn_include_env: str = 'ARMNN_INCLUDE'):
+ armnn_include_path = os.getenv(armnn_include_env, '')
+ return [armnn_include_path] if armnn_include_path else ['/usr/local/include', '/usr/include']
+
+
+@lru_cache(maxsize=1)
+def find_armnn(lib_name: str,
+ optional: bool = False,
+ armnn_libs_env: str = 'ARMNN_LIB',
+ default_lib_search: tuple = linux_gcc_lib_search()):
+ """
+ Searches for ArmNN installation on the local machine.
+
+ Args:
+ lib_name: lib name to find
+ optional: Do not fail if optional. Default is False - fail if library was not found.
+ armnn_include_env: custom environment variable pointing to ArmNN headers, default is 'ARMNN_INCLUDE'
+ armnn_libs_env: custom environment variable pointing to ArmNN libraries location, default is 'ARMNN_LIBS'
+ default_lib_search: list of paths to search for ArmNN if not found within path provided by 'ARMNN_LIBS'
+ env variable
+
+ Returns:
+ tuple containing name of the armnn libs, paths to the libs
+ """
+
+ armnn_lib_path = os.getenv(armnn_libs_env, "")
+
+ lib_search = [armnn_lib_path] if armnn_lib_path else default_lib_search
+
+ armnn_libs = dict(map(lambda path: (':{}'.format(path.name), path),
+ chain.from_iterable(map(lambda lib_path: Path(lib_path).glob(lib_name),
+ lib_search))))
+ if not optional and len(armnn_libs) == 0:
+ raise RuntimeError("""ArmNN library {} was not found in {}. Please install ArmNN to one of the standard
+ locations or set correct ARMNN_INCLUDE and ARMNN_LIB env variables.""".format(lib_name,
+ lib_search))
+
+ # gives back tuple of names of the libs, set of unique libs locations and includes.
+ return list(armnn_libs.keys()), list(set(
+ map(lambda path: str(path.absolute().parent), armnn_libs.values())))
+
+
+class LazyArmnnFinderExtension(Extension):
+ """
+ Derived from `Extension` this class adds ArmNN libraries search on the user's machine.
+ SWIG options and compilation flags are updated with relevant ArmNN libraries files locations (-L) and headers (-I).
+
+ Search for ArmNN is executed only when attributes include_dirs, library_dirs, runtime_library_dirs, libraries or
+ swig_opts are queried.
+
+ """
+
+ def __init__(self, name, sources, armnn_libs, include_dirs=None, define_macros=None, undef_macros=None,
+ library_dirs=None,
+ libraries=None, runtime_library_dirs=None, extra_objects=None, extra_compile_args=None,
+ extra_link_args=None, export_symbols=None, language=None, optional=None, **kw):
+ self._include_dirs = None
+ self._library_dirs = None
+ self._runtime_library_dirs = None
+ self._armnn_libs = armnn_libs
+ # self.__swig_opts = None
+ super().__init__(name, sources, include_dirs, define_macros, undef_macros, library_dirs, libraries,
+ runtime_library_dirs, extra_objects, extra_compile_args, extra_link_args, export_symbols,
+ language, optional, **kw)
+
+ @property
+ def include_dirs(self):
+ return self._include_dirs + find_includes()
+
+ @include_dirs.setter
+ def include_dirs(self, include_dirs):
+ self._include_dirs = include_dirs
+
+ @property
+ def library_dirs(self):
+ library_dirs = self._library_dirs
+ for lib in self._armnn_libs:
+ _, lib_path = find_armnn(lib)
+ library_dirs = library_dirs + lib_path
+
+ return library_dirs
+
+ @library_dirs.setter
+ def library_dirs(self, library_dirs):
+ self._library_dirs = library_dirs
+
+ @property
+ def runtime_library_dirs(self):
+ library_dirs = self._runtime_library_dirs
+ for lib in self._armnn_libs:
+ _, lib_path = find_armnn(lib)
+ library_dirs = library_dirs + lib_path
+
+ return library_dirs
+
+ @runtime_library_dirs.setter
+ def runtime_library_dirs(self, runtime_library_dirs):
+ self._runtime_library_dirs = runtime_library_dirs
+
+ @property
+ def libraries(self):
+ libraries = self._libraries
+ for lib in self._armnn_libs:
+ lib_names, _ = find_armnn(lib)
+ libraries = libraries + lib_names
+
+ return libraries
+
+ @libraries.setter
+ def libraries(self, libraries):
+ self._libraries = libraries
+
+ def __eq__(self, other):
+ return self.__class__ == other.__class__ and self.name == other.name
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return self.name.__hash__()
+
+if __name__ == '__main__':
+ # mandatory extensions
+ pyarmnn_module = LazyArmnnFinderExtension('pyarmnn._generated._pyarmnn',
+ sources=['src/pyarmnn/_generated/armnn_wrap.cpp'],
+ extra_compile_args=['-std=c++14'],
+ language='c++',
+ armnn_libs=['libarmnn.so']
+ )
+ pyarmnn_v_module = LazyArmnnFinderExtension('pyarmnn._generated._pyarmnn_version',
+ sources=['src/pyarmnn/_generated/armnn_version_wrap.cpp'],
+ extra_compile_args=['-std=c++14'],
+ language='c++',
+ armnn_libs=['libarmnn.so']
+ )
+ extensions_to_build = [pyarmnn_v_module, pyarmnn_module]
+
+
+ # optional extensions
+ def add_parsers_ext(name: str, ext_list: list):
+ pyarmnn_optional_module = LazyArmnnFinderExtension('pyarmnn._generated._pyarmnn_{}'.format(name.lower()),
+ sources=['src/pyarmnn/_generated/armnn_{}_wrap.cpp'.format(
+ name.lower())],
+ extra_compile_args=['-std=c++14'],
+ language='c++',
+ armnn_libs=['libarmnn.so', 'libarmnn{}.so'.format(name)]
+ )
+ ext_list.append(pyarmnn_optional_module)
+
+
+ add_parsers_ext('CaffeParser', extensions_to_build)
+ add_parsers_ext('OnnxParser', extensions_to_build)
+ add_parsers_ext('TfParser', extensions_to_build)
+ add_parsers_ext('TfLiteParser', extensions_to_build)
+
+ setup(
+ name='pyarmnn',
+ version=__version__,
+ author='Arm ltd',
+ description='Arm NN python wrapper',
+ url='https://www.arm.com',
+ license='MIT',
+ package_dir={'': 'src'},
+ packages=[
+ 'pyarmnn',
+ 'pyarmnn._generated',
+ 'pyarmnn._quantization',
+ 'pyarmnn._tensor',
+ 'pyarmnn._utilities'
+ ],
+ data_files=[('', ['licences.txt'])],
+ python_requires='>=3.5',
+ install_requires=['numpy'],
+ cmdclass={'build_py': ExtensionPriorityBuilder, 'build_ext': ArmnnVersionCheckerExtBuilder},
+ ext_modules=extensions_to_build
+ )
diff --git a/python/pyarmnn/src/pyarmnn/__init__.py b/python/pyarmnn/src/pyarmnn/__init__.py
new file mode 100644
index 0000000000..c451479614
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/__init__.py
@@ -0,0 +1,138 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import inspect
+import sys
+import logging
+
+from ._generated.pyarmnn_version import GetVersion, GetMajorVersion, GetMinorVersion
+
+# Parsers
+try:
+ from ._generated.pyarmnn_caffeparser import ICaffeParser
+except ImportError as err:
+ logger = logging.getLogger(__name__)
+ message = "Your ArmNN library instance does not support Caffe models parser functionality. "
+ logger.warning(message + "Skipped ICaffeParser import.")
+ logger.debug(str(err))
+
+
+ def ICaffeParser():
+ raise RuntimeError(message)
+
+try:
+ from ._generated.pyarmnn_onnxparser import IOnnxParser
+except ImportError as err:
+ logger = logging.getLogger(__name__)
+ message = "Your ArmNN library instance does not support Onnx models parser functionality. "
+ logger.warning(message + "Skipped IOnnxParser import.")
+ logger.debug(str(err))
+
+
+ def IOnnxParser():
+ raise RuntimeError(message)
+
+try:
+ from ._generated.pyarmnn_tfparser import ITfParser
+except ImportError as err:
+ logger = logging.getLogger(__name__)
+ message = "Your ArmNN library instance does not support TF models parser functionality. "
+ logger.warning(message + "Skipped ITfParser import.")
+ logger.debug(str(err))
+
+
+ def ITfParser():
+ raise RuntimeError(message)
+
+try:
+ from ._generated.pyarmnn_tfliteparser import ITfLiteParser
+except ImportError as err:
+ logger = logging.getLogger(__name__)
+ message = "Your ArmNN library instance does not support TF lite models parser functionality. "
+ logger.warning(message + "Skipped ITfLiteParser import.")
+ logger.debug(str(err))
+
+
+ def ITfLiteParser():
+ raise RuntimeError(message)
+
+# Network
+from ._generated.pyarmnn import Optimize, OptimizerOptions, IOptimizedNetwork, IInputSlot, \
+ IOutputSlot, IConnectableLayer, INetwork
+
+# Backend
+from ._generated.pyarmnn import BackendId
+from ._generated.pyarmnn import IDeviceSpec
+
+# Tensors
+from ._generated.pyarmnn import TensorInfo, TensorShape
+
+# Runtime
+from ._generated.pyarmnn import IRuntime, CreationOptions, INetworkProperties
+
+# Profiler
+from ._generated.pyarmnn import IProfiler
+
+# Types
+from ._generated.pyarmnn import DataType_Float32, DataType_QuantisedAsymm8, DataType_Signed32, \
+ DataType_QuantisedSymm16, DataType_Float16, DataType_QuantizedSymm8PerAxis, \
+ DataType_QuantisedSymm8, DataType_Boolean
+from ._generated.pyarmnn import DataLayout_NCHW, DataLayout_NHWC
+
+from ._generated.pyarmnn import ActivationFunction_Abs, ActivationFunction_BoundedReLu, ActivationFunction_LeakyReLu, \
+ ActivationFunction_Linear, ActivationFunction_ReLu, ActivationFunction_Sigmoid, ActivationFunction_SoftReLu, \
+ ActivationFunction_Sqrt, ActivationFunction_Square, ActivationFunction_TanH, ActivationDescriptor
+from ._generated.pyarmnn import ArgMinMaxFunction_Max, ArgMinMaxFunction_Min, ArgMinMaxDescriptor
+from ._generated.pyarmnn import BatchNormalizationDescriptor, BatchToSpaceNdDescriptor
+from ._generated.pyarmnn import ComparisonDescriptor, ComparisonOperation_Equal, ComparisonOperation_Greater, \
+ ComparisonOperation_GreaterOrEqual, ComparisonOperation_Less, \
+ ComparisonOperation_LessOrEqual, ComparisonOperation_NotEqual
+from ._generated.pyarmnn import Convolution2dDescriptor, DepthToSpaceDescriptor, DepthwiseConvolution2dDescriptor, \
+ DetectionPostProcessDescriptor, FakeQuantizationDescriptor, FullyConnectedDescriptor, \
+ InstanceNormalizationDescriptor, LstmDescriptor, L2NormalizationDescriptor, MeanDescriptor
+from ._generated.pyarmnn import NormalizationAlgorithmChannel_Across, NormalizationAlgorithmChannel_Within, \
+ NormalizationAlgorithmMethod_LocalBrightness, NormalizationAlgorithmMethod_LocalContrast, NormalizationDescriptor
+from ._generated.pyarmnn import PadDescriptor
+from ._generated.pyarmnn import PermutationVector, PermuteDescriptor
+from ._generated.pyarmnn import OutputShapeRounding_Ceiling, OutputShapeRounding_Floor, \
+ PaddingMethod_Exclude, PaddingMethod_IgnoreValue, PoolingAlgorithm_Average, PoolingAlgorithm_L2, \
+ PoolingAlgorithm_Max, Pooling2dDescriptor
+from ._generated.pyarmnn import ResizeMethod_Bilinear, ResizeMethod_NearestNeighbor, ResizeDescriptor, \
+ ReshapeDescriptor, SliceDescriptor, SpaceToBatchNdDescriptor, SpaceToDepthDescriptor, StandInDescriptor, \
+ StackDescriptor, StridedSliceDescriptor, SoftmaxDescriptor, TransposeConvolution2dDescriptor, \
+ SplitterDescriptor
+from ._generated.pyarmnn import ConcatDescriptor, CreateDescriptorForConcatenation
+
+from ._generated.pyarmnn import LstmInputParams
+
+# Public API
+# Quantization
+from ._quantization.quantize_and_dequantize import quantize, dequantize
+
+# Tensor
+from ._tensor.tensor import Tensor
+from ._tensor.const_tensor import ConstTensor
+from ._tensor.workload_tensors import make_input_tensors, make_output_tensors, workload_tensors_to_ndarray
+
+# Utilities
+from ._utilities.profiling_helper import ProfilerData, get_profiling_data
+
+from ._version import __version__, __arm_ml_version__
+
+ARMNN_VERSION = GetVersion()
+
+
+def __check_version():
+ from ._version import check_armnn_version
+ check_armnn_version(ARMNN_VERSION)
+
+
+__check_version()
+
+__all__ = []
+
+__private_api_names = ['__check_version']
+
+for name, obj in inspect.getmembers(sys.modules[__name__]):
+ if inspect.isclass(obj) or inspect.isfunction(obj):
+ if name not in __private_api_names:
+ __all__.append(name)
diff --git a/python/pyarmnn/src/pyarmnn/_generated/__init__.py b/python/pyarmnn/src/pyarmnn/_generated/__init__.py
new file mode 100644
index 0000000000..18b11630d1
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_generated/__init__.py
@@ -0,0 +1,2 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
diff --git a/python/pyarmnn/src/pyarmnn/_quantization/__init__.py b/python/pyarmnn/src/pyarmnn/_quantization/__init__.py
new file mode 100644
index 0000000000..fd9bbf1db7
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_quantization/__init__.py
@@ -0,0 +1,4 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+from .quantize_and_dequantize import quantize, dequantize
diff --git a/python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py b/python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py
new file mode 100644
index 0000000000..7f06b43bc8
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py
@@ -0,0 +1,70 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+from .._generated.pyarmnn import Quantize_uint8_t, Quantize_int16_t, Quantize_int32_t, \
+ Dequantize_uint8_t, Dequantize_int16_t, Dequantize_int32_t
+
+__dtype_to_quantize_function = {
+ 'uint8': Quantize_uint8_t,
+ 'int16': Quantize_int16_t,
+ 'int32': Quantize_int32_t
+ }
+
+__dtype_to_dequantize_function = {
+ 'uint8': ((0, 255), Dequantize_uint8_t),
+ 'int16': ((-32768, 32767), Dequantize_int16_t),
+ 'int32': ((-2147483648, 2147483647), Dequantize_int32_t)
+ }
+
+
+def quantize(value: float, scale: float, offset: int, target_dtype: str) -> int:
+ """Quantize given value to the given target datatype using Arm NN.
+
+ This function can be used to convert a 32-bit floating point value into 16/32-bit
+ integer or 8-bit unsigned integer values.
+
+ Args:
+ value (float): The value to be quantized.
+ scale (float): A numeric constant that the value is multiplied by.
+ offset (int): A 'zero-point' used to 'shift' the integer range.
+ target_dtype (str): The target data type. Supported values: 'unit8', 'int16', 'int32'.
+
+ Returns:
+ int: A quantized 8-bit unsigned integer value or 16/32-bit integer value.
+ """
+
+ if target_dtype not in __dtype_to_quantize_function:
+ raise ValueError("""Unexpected target datatype {} given.
+ Armnn currently supports quantization to {} values.""".format(target_dtype, list(__dtype_to_quantize_function.keys())))
+
+ return __dtype_to_quantize_function[target_dtype](float(value), scale, offset)
+
+
+def dequantize(value: int, scale: float, offset: float, from_dtype: str) -> float:
+ """Dequantize given value from the given datatype using Armnn.
+
+ This function can be used to convert an 8-bit unsigned integer value or 16/32-bit
+ integer value into a 32-bit floating point value. Typically used when decoding an
+ output value from an output tensor on a quantized model.
+
+ Args:
+ value (int): The value to be dequantized. Value could be numpy numeric data type.
+ scale (float): A numeric constant that the value is multiplied by.
+ offset (float): A 'zero-point' used to 'shift' the integer range.
+ from_dtype (str): The data type 'value' represents. Supported values: 'unit8', 'int16', 'int32'.
+
+ Returns:
+ float: A dequantized 32-bit floating-point value.
+ """
+
+ # specifies which function to use with given datatype and the value range for that data type.
+ if from_dtype not in __dtype_to_dequantize_function:
+ raise ValueError("""Unexpected value datatype {} given.
+ Armnn currently supports dequantization from {} values.""".format(from_dtype, list(__dtype_to_dequantize_function.keys())))
+
+ input_range = __dtype_to_dequantize_function[from_dtype][0]
+
+ if not input_range[0] <= value <= input_range[1]:
+ raise ValueError('Value is not within range of the given datatype {}'.format(from_dtype))
+
+ return __dtype_to_dequantize_function[from_dtype][1](int(value), scale, offset)
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/__init__.py b/python/pyarmnn/src/pyarmnn/_tensor/__init__.py
new file mode 100644
index 0000000000..0c928785b4
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_tensor/__init__.py
@@ -0,0 +1,6 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+from .const_tensor import ConstTensor
+from .tensor import Tensor
+from .workload_tensors import make_input_tensors, make_output_tensors, workload_tensors_to_ndarray
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py b/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
new file mode 100644
index 0000000000..9735d7a63b
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
@@ -0,0 +1,159 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import numpy as np
+
+from .._generated.pyarmnn import DataType_QuantisedAsymm8, DataType_QuantisedSymm16, DataType_Signed32, \
+ DataType_Float32, DataType_Float16
+from .._generated.pyarmnn import ConstTensor as AnnConstTensor, TensorInfo, Tensor
+
+
+class ConstTensor(AnnConstTensor):
+ """Creates a PyArmNN ConstTensor object.
+
+ A ConstTensor is a Tensor with an immutable data store. Typically, a ConstTensor
+ is used to input data into a network when running inference.
+
+ This class overrides the swig generated Tensor class. The aim of
+ this is to have an easy to use public API for the ConstTensor objects.
+
+ """
+
+ def __init__(self, *args):
+ """
+ Supported tensor data types:
+ DataType_QuantisedAsymm8,
+ DataType_QuantisedSymm16,
+ DataType_Signed32,
+ DataType_Float32,
+ DataType_Float16
+
+ Examples:
+ Create empty ConstTensor
+ >>> import pyarmnn as ann
+ >>> ann.ConstTensor()
+
+ Create ConstTensor given tensor info and input data
+ >>> input_data = ... # numpy array
+ >>> ann.ConstTensor(ann.TensorInfo(...), input_data)
+
+ Create ConstTensor from another ConstTensor i.e. copy ConstTensor
+ >>> ann.ConstTensor(ann.ConstTensor())
+
+ Create ConstTensor from tensor
+ >>> ann.ConstTensor(ann.Tensor())
+
+ Args:
+ tensor (Tensor, optional): Create a ConstTensor from a Tensor.
+ const_tensor (ConstTensor, optional): Create a ConstTensor from a ConstTensor i.e. copy.
+ tensor_info (TensorInfo, optional): Tensor information.
+ input_data (ndarray): Numpy array. The numpy array will be transformed to a
+ buffer according to type returned by `TensorInfo.GetDataType`.
+ Input data values type must correspond to data type returned by
+ `TensorInfo.GetDataType`.
+
+ Raises:
+ TypeError: Unsupported input data type.
+ ValueError: Unsupported tensor data type and incorrect input data size.
+ """
+ self.__memory_area = None
+
+ # TensorInfo as first argument and numpy array as second
+ if len(args) > 1 and isinstance(args[0], TensorInfo):
+ if isinstance(args[1], np.ndarray):
+ self.__create_memory_area(args[0].GetDataType(), args[0].GetNumBytes(), args[0].GetNumElements(),
+ args[1])
+ super().__init__(args[0], self.__memory_area.data)
+ else:
+ raise TypeError('Data must be provided as a numpy array.')
+
+ # copy constructor - reference to memory area is passed from copied const
+ # tensor and armnn's copy constructor is called
+ elif len(args) > 0 and isinstance(args[0], (ConstTensor, Tensor)):
+ self.__memory_area = args[0].get_memory_area()
+ super().__init__(args[0])
+
+ # empty tensor
+ elif len(args) == 0:
+ super().__init__()
+
+ else:
+ raise ValueError('Incorrect number of arguments or type of arguments provided to create Const Tensor.')
+
+ def __copy__(self) -> 'ConstTensor':
+ """ Make copy of a const tensor.
+
+ Make const tensor copyable using the python copy operation.
+
+ Note:
+ The tensor memory area is NOT copied. Instead, the new tensor maintains a
+ reference to the same memory area as the old tensor.
+
+ Example:
+ Copy empty tensor
+ >>> from copy import copy
+ >>> import pyarmnn as ann
+ >>> tensor = ann.ConstTensor()
+ >>> copied_tensor = copy(tensor)
+
+ Returns:
+ Tensor: a copy of the tensor object provided.
+
+ """
+ return ConstTensor(self)
+
+ @staticmethod
+ def __check_size(data: np.ndarray, num_bytes: int, num_elements: int):
+ """ Check the size of the input data against the number of bytes provided by tensor info.
+
+ Args:
+ data (ndarray): Input data.
+ num_bytes (int): Number of bytes required by tensor info.
+ num_elements: Number of elements required by tensor info.
+
+ Raises:
+ ValueError: number of bytes in input data does not match tensor info.
+
+ """
+ size_in_bytes = data.nbytes
+ elements = data.size
+
+ if size_in_bytes != num_bytes:
+ raise ValueError(
+ "ConstTensor requires {} bytes, {} provided. "
+ "Is your input array data type ({}) aligned with TensorInfo?".format(num_bytes, size_in_bytes,
+ data.dtype))
+ elif elements != num_elements:
+ raise ValueError("ConstTensor requires {} elements, {} provided.".format(num_elements, elements))
+
+ def __create_memory_area(self, data_type: int, num_bytes: int, num_elements: int, data: np.ndarray):
+ """ Create the memory area used by the tensor to output its results.
+
+ Args:
+ data_type (int): The type of data that will be stored in the memory area.
+ See DataType_*.
+ num_bytes (int): Determines the size of the memory area that will be created.
+ num_elements (int): Determines number of elements in memory area.
+ data (ndarray): Input data as numpy array.
+
+ """
+ np_data_type_mapping = {DataType_QuantisedAsymm8: np.uint8,
+ DataType_Float32: np.float32,
+ DataType_QuantisedSymm16: np.int16,
+ DataType_Signed32: np.int32,
+ DataType_Float16: np.float16}
+
+ if data_type not in np_data_type_mapping:
+ raise ValueError("The data type provided for this Tensor is not supported: {}".format(data_type))
+
+ self.__check_size(data, num_bytes, num_elements)
+ self.__memory_area = data
+ self.__memory_area.flags.writeable = False
+
+ def get_memory_area(self) -> np.ndarray:
+ """ Get values that are stored by the tensor.
+
+ Returns:
+ ndarray: Tensor data (as numpy array).
+
+ """
+ return self.__memory_area
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/tensor.py b/python/pyarmnn/src/pyarmnn/_tensor/tensor.py
new file mode 100644
index 0000000000..5906b6bae6
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_tensor/tensor.py
@@ -0,0 +1,119 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import numpy as np
+
+from .._generated.pyarmnn import Tensor as annTensor, TensorInfo, DataType_QuantisedAsymm8, \
+ DataType_Float32, DataType_QuantisedSymm16, DataType_Signed32, DataType_Float16
+
+
+class Tensor(annTensor):
+ """pyArmnn Tensor object
+
+ This class overrides the swig generated Tensor class. The aim of
+ this is to create an easy to use public api for the Tensor object.
+
+ Memory is allocated and managed by this class, avoiding the need to manage
+ a separate memory area for the tensor compared to the swig generated api.
+
+ """
+
+ def __init__(self, *args):
+ """ Create Tensor object.
+
+ Supported tensor data types:
+ DataType_QuantisedAsymm8,
+ DataType_QuantisedSymm16,
+ DataType_Signed32,
+ DataType_Float32,
+ DataType_Float16
+
+ Examples:
+ Create an empty tensor
+ >>> import pyarmnn as ann
+ >>> ann.Tensor()
+
+ Create tensor given tensor information
+ >>> ann.Tensor(ann.TensorInfo(...))
+
+ Create tensor from another tensor i.e. copy a tensor
+ >>> ann.Tensor(ann.Tensor())
+
+ Args:
+ tensor(Tensor, optional): Create Tensor from a Tensor i.e. copy.
+ tensor_info (TensorInfo, optional): Tensor information.
+
+ Raises:
+ TypeError: unsupported input data type.
+ ValueError: appropriate constructor could not be found with provided arguments.
+
+ """
+ self.__memory_area = None
+
+ # TensorInfo as first argument, we need to create memory area manually
+ if len(args) > 0 and isinstance(args[0], TensorInfo):
+ self.__create_memory_area(args[0].GetDataType(), args[0].GetNumElements())
+ super().__init__(args[0], self.__memory_area.data)
+
+ # copy constructor - reference to memory area is passed from copied tensor
+ # and armnn's copy constructor is called
+ elif len(args) > 0 and isinstance(args[0], Tensor):
+ self.__memory_area = args[0].get_memory_area()
+ super().__init__(args[0])
+
+ # empty constructor
+ elif len(args) == 0:
+ super().__init__()
+
+ else:
+ raise ValueError('Incorrect number of arguments or type of arguments provided to create Tensor.')
+
+ def __copy__(self) -> 'Tensor':
+ """ Make copy of a tensor.
+
+ Make tensor copyable using the python copy operation.
+
+ Note:
+ The tensor memory area is NOT copied. Instead, the new tensor maintains a
+ reference to the same memory area as the old tensor.
+
+ Example:
+ Copy empty tensor
+ >>> from copy import copy
+ >>> import pyarmnn as ann
+ >>> tensor = ann.Tensor()
+ >>> copied_tensor = copy(tensor)
+
+ Returns:
+ Tensor: a copy of the tensor object provided.
+
+ """
+ return Tensor(self)
+
+ def __create_memory_area(self, data_type: int, num_elements: int):
+ """ Create the memory area used by the tensor to output its results.
+
+ Args:
+ data_type (int): The type of data that will be stored in the memory area.
+ See DataType_*.
+ num_elements (int): Determines the size of the memory area that will be created.
+
+ """
+ np_data_type_mapping = {DataType_QuantisedAsymm8: np.uint8,
+ DataType_Float32: np.float32,
+ DataType_QuantisedSymm16: np.int16,
+ DataType_Signed32: np.int32,
+ DataType_Float16: np.float16}
+
+ if data_type not in np_data_type_mapping:
+ raise ValueError("The data type provided for this Tensor is not supported.")
+
+ self.__memory_area = np.empty(shape=(num_elements,), dtype=np_data_type_mapping[data_type])
+
+ def get_memory_area(self) -> np.ndarray:
+ """ Get values that are stored by the tensor.
+
+ Returns:
+ ndarray : Tensor data (as numpy array).
+
+ """
+ return self.__memory_area
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py b/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
new file mode 100644
index 0000000000..e345a1a5d4
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
@@ -0,0 +1,123 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+"""
+This file contains functions relating to WorkloadTensors.
+WorkloadTensors are the inputTensors and outputTensors that are consumed by IRuntime.EnqueueWorkload.
+"""
+from typing import Union, List, Tuple
+
+import numpy as np
+
+from .tensor import Tensor
+from .const_tensor import ConstTensor
+
+
+def make_input_tensors(inputs_binding_info: List[Tuple],
+ input_data: List[np.ndarray]) -> List[Tuple[int, ConstTensor]]:
+ """Returns `inputTensors` to be used with `IRuntime.EnqueueWorkload`.
+
+ This is the primary function to call when you want to produce `inputTensors` for `IRuntime.EnqueueWorkload`.
+ The output is a list of tuples containing ConstTensors with a corresponding input tensor id.
+ The output should be used directly with `IRuntime.EnqueueWorkload`.
+ This function works for single or multiple input data and binding information.
+
+ Examples:
+ Creating inputTensors.
+ >>> import pyarmnn as ann
+ >>> import numpy as np
+ >>>
+ >>> parser = ann.ITfLiteParser()
+ >>> ...
+ >>> example_image = np.array(...)
+ >>> input_binding_info = parser.GetNetworkInputBindingInfo(...)
+ >>>
+ >>> input_tensors = ann.make_input_tensors([input_binding_info], [example_image])
+
+ Args:
+ inputs_binding_info (list of tuples): (int, `TensorInfo`) Binding information for input tensors obtained from `GetNetworkInputBindingInfo`.
+ input_data (ndarray): Tensor data to be used for inference.
+
+ Returns:
+ list: `inputTensors` - A list of tuples (`int` , `ConstTensor`).
+
+
+ Raises:
+ ValueError: If length of `inputs_binding_info` and `input_data` are not the same.
+ """
+ if len(inputs_binding_info) != len(input_data):
+ raise ValueError("Length of 'inputs_binding_info' does not match length of 'input_data'")
+
+ input_tensors = []
+
+ for in_bind_info, in_data in zip(inputs_binding_info, input_data):
+ in_tensor_id = in_bind_info[0]
+ in_tensor_info = in_bind_info[1]
+ input_tensors.append((in_tensor_id, ConstTensor(in_tensor_info, in_data)))
+
+ return input_tensors
+
+
+def make_output_tensors(outputs_binding_info: List[Tuple]) -> List[Tuple[int, Tensor]]:
+ """Returns `outputTensors` to be used with `IRuntime.EnqueueWorkload`.
+
+ This is the primary function to call when you want to produce `outputTensors` for `IRuntime.EnqueueWorkload`.
+ The output is a list of tuples containing Tensors with a corresponding output tensor id.
+ The output should be used directly with `IRuntime.EnqueueWorkload`.
+
+ Examples:
+ Creating outputTensors.
+ >>> import pyarmnn as ann
+ >>>
+ >>> parser = ann.ITfLiteParser()
+ >>> ...
+ >>> output_binding_info = parser.GetNetworkOutputBindingInfo(...)
+ >>>
+ >>> output_tensors = ann.make_output_tensors([output_binding_info])
+
+ Args:
+ outputs_binding_info (list of tuples): (int, `TensorInfo`) Binding information for output tensors obtained from `GetNetworkOutputBindingInfo`.
+
+ Returns:
+ list: `outputTensors` - A list of tuples (`int`, `Tensor`).
+ """
+ output_tensors = []
+
+ for out_bind_info in outputs_binding_info:
+ out_tensor_id = out_bind_info[0]
+ out_tensor_info = out_bind_info[1]
+ output_tensors.append((out_tensor_id, Tensor(out_tensor_info)))
+
+ return output_tensors
+
+
+def workload_tensors_to_ndarray(workload_tensors: List[Tuple[int, Union[Tensor, ConstTensor]]]) -> List[np.ndarray]:
+ """Returns a list of the underlying tensor data as ndarrays from `inputTensors` or `outputTensors`.
+
+ We refer to `inputTensors` and `outputTensors` as workload tensors because
+ they are used with `IRuntime.EnqueueWorkload`.
+ Although this function can be used on either `inputTensors` or `outputTensors` the main use of this function
+ is to collect results from `outputTensors` after `IRuntime.EnqueueWorkload` has been called.
+
+ Examples:
+ Getting results after inference.
+ >>> import pyarmnn as ann
+ >>>
+ >>> ...
+ >>> runtime = ann.IRuntime(...)
+ >>> ...
+ >>> runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+ >>>
+ >>> inference_results = tensors_to_ndarray(output_tensors)
+
+ Args:
+ workload_tensors (inputTensors or outputTensors): `inputTensors` or `outputTensors` to get data from.
+
+ Returns:
+ list: List of `ndarrays` for the underlying tensor data from given `inputTensors` or `outputTensors`.
+ """
+ arrays = []
+ for index, (_, tensor) in enumerate(workload_tensors):
+ arrays.append(tensor.get_memory_area())
+ print("Workload tensor {} shape: {}".format(index, tensor.GetShape()))
+
+ return arrays
diff --git a/python/pyarmnn/src/pyarmnn/_utilities/__init__.py b/python/pyarmnn/src/pyarmnn/_utilities/__init__.py
new file mode 100644
index 0000000000..e60fae0880
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_utilities/__init__.py
@@ -0,0 +1,4 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+from .profiling_helper import ProfilerData, get_profiling_data
diff --git a/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py b/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py
new file mode 100644
index 0000000000..d10c28915e
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py
@@ -0,0 +1,95 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import json
+from collections import namedtuple
+
+ProfilerData = namedtuple('ProfilerData', ['inference_data', 'per_workload_execution_data'])
+ProfilerData.__doc__ = """Container to hold the profiling inference data, and the profiling data per workload.
+
+Contains:
+ inference_data (dict): holds end-to-end inference performance data. Keys:
+ 'time_unit' - timer units.
+ 'execution_time' - list of total inference execution times for each inference run.
+ per_workload_execution_data (dict): holds per operation performance data, key is a operation name
+ Each operation has
+ 'time_unit' - timer units.
+ 'execution_time' - list of total execution times for each inference run.
+ 'backend' - backend used for this operation.
+
+Example:
+
+ >>> data = get_profiling_data(profiler)
+ >>> print(data)
+ >>> ProfilerData(inference_data={'time_unit': 'us',
+ 'execution_time': [8901372.972]},
+ per_workload_execution_data={'CopyMemGeneric_Execute_#3': {'time_unit': 'us',
+ 'execution_time': [28.941],
+ 'backend': 'Unknown'},
+ 'RefConvolution2dWorkload_Execute_#5': {'time_unit': 'us',
+ 'execution_time': [126838.071],
+ 'backend': 'CpuRef'},
+ 'RefDepthwiseConvolution2dWorkload_Execute_#6': {'time_unit': 'us',
+ 'execution_time': [49886.208],
+ 'backend': 'CpuRef'}
+ ...etc
+ }
+ )
+"""
+
+
+def get_profiling_data(profiler: 'IProfiler') -> ProfilerData:
+ """Reads IProfiler object passed in, extracts the relevant data
+ and returns it in a ProfilerData container.
+
+ Args:
+ profile_log (IProfiler): The IProfiler object to be parsed.
+
+ Returns:
+ ProfilerData: A container containing the relevant data extracted from the Profiler output.
+ """
+
+ top_level_dict = json.loads(profiler.as_json())
+ armnn_data = top_level_dict["ArmNN"]
+ inference_measurements = armnn_data["inference_measurements_#1"]
+ execution_data = inference_measurements["Execute_#2"]
+
+ workload_data = {}
+ inference_data = {}
+ for exec_key, exec_value in execution_data.items():
+ # Check all items with a type.
+ if "type" in exec_value and exec_value["type"] == "Event":
+ for event_key, event_value in exec_value.items():
+ if event_key.startswith("Wall clock time_#") and event_value["type"] == "Measurement":
+ time_data = __get_wall_clock_times__(event_value)
+ time_data["backend"] = __get_backend(exec_key)
+ workload_data[exec_key] = time_data
+ # This is the total inference time map
+ if exec_key.startswith("Wall clock time_#") and exec_value["type"] == "Measurement":
+ time_data = __get_wall_clock_times__(exec_value)
+ inference_data.update(time_data)
+ return ProfilerData(inference_data=inference_data, per_workload_execution_data=workload_data)
+
+
+def __get_wall_clock_times__(wall_clock_item):
+ execution_times = wall_clock_item["raw"]
+ time_data = {}
+ raw_data = []
+ for time in execution_times:
+ raw_data.append(time)
+ time_data["time_unit"] = wall_clock_item["unit"]
+ time_data["execution_time"] = raw_data
+ return time_data
+
+
+def __get_backend(exec_key):
+ if "ref" in exec_key.lower():
+ return "CpuRef"
+ elif "neon" in exec_key.lower():
+ return "CpuAcc"
+ elif "cl" in exec_key.lower():
+ return "GpuAcc"
+ elif "npu" in exec_key.lower():
+ return "NpuAcc"
+ else:
+ return "Unknown"
+
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
new file mode 100644
index 0000000000..2bcb888819
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -0,0 +1,26 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+version_info = (19, 11, 0)
+
+__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
+
+if __dev_version_env:
+ __dev_version = "dev0"
+ try:
+ __dev_version = "dev{}".format(int(__dev_version_env))
+ except ValueError:
+ __dev_version = str(__dev_version_env)
+
+ version_info = (*version_info, __dev_version)
+
+__version__ = '.'.join(str(c) for c in version_info)
+__arm_ml_version__ = '2{:03d}{:02d}{:02d}'.format(version_info[0], version_info[1], version_info[2])
+
+
+def check_armnn_version(installed_armnn_version, expected_armnn_version=__arm_ml_version__):
+ expected_armnn_version = expected_armnn_version[:-2] # cut off minor patch version
+ installed_armnn_version = installed_armnn_version[:-2] # cut off minor patch version
+ assert expected_armnn_version == installed_armnn_version, \
+ "Expected ArmNN version is {} but installed ArmNN version is {}".format(expected_armnn_version, installed_armnn_version)
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn.i b/python/pyarmnn/src/pyarmnn/swig/armnn.i
new file mode 100644
index 0000000000..48e0f2edbb
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn.i
@@ -0,0 +1,27 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%module pyarmnn
+%{
+#define SWIG_FILE_WITH_INIT
+#include "armnn/Types.hpp"
+%}
+
+//typemap definitions and other common stuff
+%include "standard_header.i"
+
+//armnn api submodules
+%include "modules/armnn_backend.i"
+%include "modules/armnn_types.i"
+%include "modules/armnn_descriptors.i"
+%include "modules/armnn_lstmparam.i"
+%include "modules/armnn_network.i"
+%include "modules/armnn_profiler.i"
+%include "modules/armnn_runtime.i"
+%include "modules/armnn_tensor.i"
+%include "modules/armnn_types_utils.i"
+
+// Clear exception typemap.
+%exception;
+
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i
new file mode 100644
index 0000000000..fa1a71fd9f
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_caffeparser.i
@@ -0,0 +1,103 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%module pyarmnn_caffeparser
+%{
+#define SWIG_FILE_WITH_INIT
+#include "armnnCaffeParser/ICaffeParser.hpp"
+#include "armnn/INetwork.hpp"
+%}
+
+//typemap definitions and other common stuff
+%include "standard_header.i"
+
+namespace std {
+ %template(BindingPointInfo) pair<int, armnn::TensorInfo>;
+ %template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
+ %template(StringVector) vector<string>;
+}
+
+namespace armnnCaffeParser
+{
+
+%feature("docstring",
+"
+Interface for creating a parser object using Caffe (http://caffe.berkeleyvision.org/) caffemodel files.
+
+Parsers are used to automatically construct Arm NN graphs from model files.
+
+") ICaffeParser;
+
+%nodefaultctor ICaffeParser;
+class ICaffeParser
+{
+public:
+ // Documentation
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
+
+ Args:
+ name (str): Name of the input.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`)
+ ") GetNetworkInputBindingInfo;
+
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name.
+
+ Args:
+ name (str): Name of the output.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`)
+ ") GetNetworkOutputBindingInfo;
+
+ std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(const std::string& name);
+ std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(const std::string& name);
+};
+
+%extend ICaffeParser {
+ // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
+ // method for ICaffeParser python object that will use static factory method to do the job.
+
+ ICaffeParser() {
+ return armnnCaffeParser::ICaffeParser::CreateRaw();
+ }
+
+ // The following does not replace a real destructor of the Armnn class.
+ // It creates a functions that will be called when swig object goes out of the scope to clean resources.
+ // so the user doesn't need to call ICaffeParser::Destroy himself.
+ // $self` is a pointer to extracted ArmNN ICaffeParser object.
+
+ ~ICaffeParser() {
+ armnnCaffeParser::ICaffeParser::Destroy($self);
+ }
+
+ %feature("docstring",
+ "
+ Create the network from a Caffe caffemodel binary file on disk.
+
+ Args:
+ graphFile: Path to the caffe model to be parsed.
+ inputShapes (tuple): (`string`, `TensorShape`) A tuple containing the input name and TensorShape information for the network.
+ requestedOutputs (list): A list of the output tensor names.
+
+ Returns:
+ INetwork: INetwork object for the parsed Caffe model.
+ ") CreateNetworkFromBinaryFile;
+
+ %newobject CreateNetworkFromBinaryFile;
+ armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile,
+ const std::map<std::string, armnn::TensorShape>& inputShapes,
+ const std::vector<std::string>& requestedOutputs) {
+ return $self->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs).release();
+ }
+}
+}
+
+// Clear exception typemap.
+%exception;
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_onnxparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_onnxparser.i
new file mode 100644
index 0000000000..e72a425374
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_onnxparser.i
@@ -0,0 +1,96 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%module pyarmnn_onnxparser
+%{
+#define SWIG_FILE_WITH_INIT
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#include "armnn/INetwork.hpp"
+%}
+
+//typemap definitions and other common stuff
+%include "standard_header.i"
+
+namespace std {
+ %template(BindingPointInfo) pair<int, armnn::TensorInfo>;
+ %template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
+ %template(StringVector) vector<string>;
+}
+
+namespace armnnOnnxParser
+{
+%feature("docstring",
+"
+Interface for creating a parser object using ONNX (https://onnx.ai/) onnx files.
+
+Parsers are used to automatically construct Arm NN graphs from model files.
+
+") IOnnxParser;
+
+%nodefaultctor IOnnxParser;
+class IOnnxParser
+{
+public:
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
+
+ Args:
+ name (string): Name of the input node.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`)
+ ") GetNetworkInputBindingInfo;
+ std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(const std::string& name);
+
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name.
+
+ Args:
+ name (string): Name of the output node.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`)
+ ") GetNetworkOutputBindingInfo;
+ std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(const std::string& name);
+};
+
+%extend IOnnxParser {
+ // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
+ // method for IOnnxParser python object that will use static factory method to do the job.
+ IOnnxParser() {
+ return armnnOnnxParser::IOnnxParser::CreateRaw();
+ }
+
+ // The following does not replace a real destructor of the Armnn class.
+ // It creates a functions that will be called when swig object goes out of the scope to clean resources.
+ // so the user doesn't need to call IOnnxParser::Destroy himself.
+ // $self` is a pointer to extracted ArmNN IOnnxParser object.
+ ~IOnnxParser() {
+ armnnOnnxParser::IOnnxParser::Destroy($self);
+ }
+
+ %feature("docstring",
+ "
+ Create the network from a binary file on disk.
+
+ Args:
+ graphFile (str): Path to the onnx model to be parsed.
+
+ Returns:
+ INetwork: Parsed network.
+
+ Raises:
+ RuntimeError: If model file was not found.
+ ") CreateNetworkFromBinaryFile;
+ %newobject CreateNetworkFromBinaryFile;
+ armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile) {
+ return $self->CreateNetworkFromBinaryFile(graphFile).release();
+ }
+}
+
+}
+// Clear exception typemap.
+%exception;
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
new file mode 100644
index 0000000000..fbe5fd7720
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
@@ -0,0 +1,132 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%module pyarmnn_tfliteparser
+%{
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include "armnn/Types.hpp"
+#include "armnn/INetwork.hpp"
+%}
+
+//typemap definitions and other common stuff
+%include "standard_header.i"
+
+namespace std {
+ %template(BindingPointInfo) pair<int, armnn::TensorInfo>;
+ %template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
+ %template(StringVector) vector<string>;
+}
+
+namespace armnnTfLiteParser
+{
+%feature("docstring",
+"
+Interface for creating a parser object using TfLite (https://www.tensorflow.org/lite) tflite files.
+
+Parsers are used to automatically construct Arm NN graphs from model files.
+
+") ITfLiteParser;
+%nodefaultctor ITfLiteParser;
+class ITfLiteParser
+{
+public:
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.
+ Args:
+ subgraphId (int): The subgraph id.
+ name (str): Name of the input.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`).
+ ") GetNetworkInputBindingInfo;
+ std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(size_t subgraphId, const std::string& name);
+
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.
+
+ Args:
+ subgraphId (int): The subgraphID.
+ name (str): Name of the output.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`).
+ ") GetNetworkOutputBindingInfo;
+ std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(size_t subgraphId, const std::string& name);
+
+ %feature("docstring",
+ "
+ Return the number of subgraphs in the parsed model.
+ Returns:
+ int: The number of subgraphs.
+ ") GetSubgraphCount;
+ size_t GetSubgraphCount();
+
+ %feature("docstring",
+ "
+ Return the input tensor names for a given subgraph.
+
+ Args:
+ subgraphId (int): The subgraph id.
+
+ Returns:
+ list: A list of the input tensor names for the given model.
+ ") GetSubgraphInputTensorNames;
+ std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId);
+
+ %feature("docstring",
+ "
+ Return the output tensor names for a given subgraph.
+
+ Args:
+ subgraphId (int): The subgraph id
+
+ Returns:
+ list: A list of the output tensor names for the given model.
+ ") GetSubgraphOutputTensorNames;
+ std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId);
+};
+
+%extend ITfLiteParser {
+// This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
+// method for ITfLiteParser python object that will use static factory method to do the job.
+
+ ITfLiteParser() {
+ return armnnTfLiteParser::ITfLiteParser::CreateRaw();
+ }
+
+// The following does not replace a real destructor of the Armnn class.
+// It creates a functions that will be called when swig object goes out of the scope to clean resources.
+// so the user doesn't need to call ITfLiteParser::Destroy himself.
+// $self` is a pointer to extracted ArmNN ITfLiteParser object.
+
+ ~ITfLiteParser() {
+ armnnTfLiteParser::ITfLiteParser::Destroy($self);
+ }
+
+ %feature("docstring",
+ "
+ Create the network from a flatbuffers binary file.
+
+ Args:
+ graphFile (str): Path to the tflite model to be parsed.
+
+ Returns:
+ INetwork: Parsed network.
+
+ Raises:
+ RuntimeError: If model file was not found.
+ ") CreateNetworkFromBinaryFile;
+
+ %newobject CreateNetworkFromBinaryFile;
+ armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile) {
+ return $self->CreateNetworkFromBinaryFile(graphFile).release();
+ }
+
+}
+
+}
+// Clear exception typemap.
+%exception;
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_tfparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_tfparser.i
new file mode 100644
index 0000000000..3438492d26
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_tfparser.i
@@ -0,0 +1,102 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%module pyarmnn_tfparser
+%{
+#define SWIG_FILE_WITH_INIT
+#include "armnnTfParser/ITfParser.hpp"
+#include "armnn/INetwork.hpp"
+%}
+
+//typemap definitions and other common stuff
+%include "standard_header.i"
+
+namespace std {
+ %template(BindingPointInfo) pair<int, armnn::TensorInfo>;
+ %template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
+ %template(StringVector) vector<string>;
+}
+
+namespace armnnTfParser
+{
+%feature("docstring",
+"
+Interface for creating a parser object using TensorFlow (https://www.tensorflow.org/) frozen pb files.
+
+Parsers are used to automatically construct Arm NN graphs from model files.
+
+") ITfParser;
+%nodefaultctor ITfParser;
+class ITfParser
+{
+public:
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
+
+ Args:
+ name (str): Name of the input.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`).
+ ") GetNetworkInputBindingInfo;
+ std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(const std::string& name);
+
+ %feature("docstring",
+ "
+ Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name.
+
+ Args:
+ name (str): Name of the output.
+
+ Returns:
+ tuple: (`int`, `TensorInfo`).
+ ") GetNetworkOutputBindingInfo;
+ std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(const std::string& name);
+};
+
+%extend ITfParser {
+ // This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
+ // method for ITfParser python object that will use static factory method to do the job.
+
+ ITfParser() {
+ return armnnTfParser::ITfParser::CreateRaw();
+ }
+
+ // The following does not replace a real destructor of the Armnn class.
+ // It creates a functions that will be called when swig object goes out of the scope to clean resources.
+ // so the user doesn't need to call ITfParser::Destroy himself.
+ // $self` is a pointer to extracted ArmNN ITfParser object.
+
+ ~ITfParser() {
+ armnnTfParser::ITfParser::Destroy($self);
+ }
+
+ %feature("docstring",
+ "
+ Create the network from a pb Protocol buffer file.
+
+ Args:
+ graphFile (str): Path to the tf model to be parsed.
+ inputShapes (dict): A dict containing the input name as a key & TensorShape as a value.
+ requestedOutputs (list of str): A list of the output tensor names.
+
+ Returns:
+ INetwork: Parsed network.
+
+ Raises:
+ RuntimeError: If model file was not found.
+ ") CreateNetworkFromBinaryFile;
+ %newobject CreateNetworkFromBinaryFile;
+ armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile,
+ const std::map<std::string, armnn::TensorShape>& inputShapes,
+ const std::vector<std::string>& requestedOutputs) {
+ return $self->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs).release();
+ }
+
+}
+
+}
+// Clear exception typemap.
+%exception;
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_version.i b/python/pyarmnn/src/pyarmnn/swig/armnn_version.i
new file mode 100644
index 0000000000..b8e760d435
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_version.i
@@ -0,0 +1,58 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%module pyarmnn_version
+
+%include "std_string.i"
+
+%{
+#define SWIG_FILE_WITH_INIT
+#include "armnn/Version.hpp"
+%}
+
+%{
+ std::string GetVersion()
+ {
+ return ARMNN_VERSION;
+ };
+
+ std::string GetMajorVersion()
+ {
+ return STRINGIFY_VALUE(ARMNN_MAJOR_VERSION);
+ };
+
+ std::string GetMinorVersion()
+ {
+ return STRINGIFY_VALUE(ARMNN_MINOR_VERSION);
+ };
+%}
+%feature("docstring",
+"
+ Returns Arm NN library full version: MAJOR + MINOR + INCREMENTAL.
+
+ Returns:
+ str: Full version of Arm NN installed.
+
+") GetVersion;
+std::string GetVersion();
+
+%feature("docstring",
+"
+ Returns Arm NN library major version. The year of the release.
+
+ Returns:
+ str: Major version of Arm NN installed.
+
+") GetMajorVersion;
+std::string GetMajorVersion();
+
+%feature("docstring",
+"
+ Returns Arm NN library minor version. Month of the year of the release.
+
+ Returns:
+ str: Minor version of Arm NN installed.
+
+") GetMinorVersion;
+std::string GetMinorVersion();
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_backend.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_backend.i
new file mode 100644
index 0000000000..4d13150a19
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_backend.i
@@ -0,0 +1,66 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+%{
+#include "armnn/BackendId.hpp"
+%}
+
+namespace std {
+ %template(BackendIdVector) vector<armnn::BackendId>;
+ %template(BackendIdSet) unordered_set<armnn::BackendId>;
+}
+
+namespace armnn
+{
+
+class BackendId
+{
+public:
+ %feature("docstring",
+ "
+ Creates backend id instance.
+ Supported backend ids: 'CpuRef', 'CpuAcc', 'GpuAcc', 'NpuAcc'.
+
+ Args:
+ id (str): Computation backend identification.
+ ") BackendId;
+
+ BackendId(const std::string& id);
+
+ %feature("docstring",
+ "
+ Checks if backend is cpu reference implementation.
+ Returns:
+ bool: True if backend supports cpu reference implementation, False otherwise.
+
+ ") IsCpuRef;
+ bool IsCpuRef();
+
+ %feature("docstring",
+ "
+ Returns backend identification.
+
+ >>> backendId = BackendId('CpuRef')
+ >>> assert 'CpuRef' == str(backendId)
+ >>> assert 'CpuRef' == backendId.Get()
+
+ Returns:
+ str: Backend identification.
+
+ ") Get;
+ const std::string& Get();
+};
+
+%extend BackendId {
+
+ std::string __str__() {
+ return $self->Get();
+ }
+
+}
+
+using BackendIdVector = std::vector<armnn::BackendId>;
+using BackendIdSet = std::unordered_set<armnn::BackendId>;
+}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
new file mode 100644
index 0000000000..eb2c8f6278
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
@@ -0,0 +1,1000 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/Descriptors.hpp"
+#include "armnn/Types.hpp"
+%}
+
+namespace std {
+ %template() vector<unsigned int>;
+ %template() vector<int>;
+ %template() vector<pair<unsigned int, unsigned int>>;
+ %template(TensorShapeVector) vector<armnn::TensorShape>;
+}
+
+%include "typemaps/vectors.i"
+
+%typemap(out) const uint32_t*
+%{
+{
+ auto len = arg1->GetNumViews();
+ $result = PyList_New(len);
+ if (!$result) {
+ Py_XDECREF($result);
+ return PyErr_NoMemory();
+ }
+ for (unsigned int i = 0; i < len; ++i) {
+
+ PyList_SetItem($result, i, PyLong_FromUnsignedLong($1[i]));
+ }
+}
+%}
+
+namespace armnn
+{
+
+%list_to_vector( std::vector<unsigned int> );
+%list_to_vector( std::vector<int> );
+%list_to_vector( std::vector<std::pair<unsigned int, unsigned int>> );
+
+%feature("docstring",
+ "
+ A configuration for the Activation layer. See `INetwork.AddActivationLayer()`.
+
+ Contains:
+ m_Function (ActivationFunction): The activation function to use
+ (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
+ Default: ActivationFunction_Sigmoid.
+ m_A (float): Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH). Default: 0.
+ m_B (float): Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH). Default: 0.
+
+ ") ActivationDescriptor;
+struct ActivationDescriptor
+{
+ ActivationDescriptor();
+
+ ActivationFunction m_Function;
+ float m_A;
+ float m_B;
+
+ bool operator ==(const ActivationDescriptor &rhs) const;
+};
+
+
+%feature("docstring",
+ "
+ A descriptor for the ArgMinMax layer. See `INetwork.AddArgMinMaxLayer()`.
+
+ Contains:
+ m_Function (int): Specify if the function is to find Min or Max with ArgMinMaxFunction_Min or ArgMinMaxFunction_Max.
+ Default: ArgMinMaxFunction_Min.
+ m_Axis (int): Axis to reduce across the input tensor. Default: -1.
+
+ ") ArgMinMaxDescriptor;
+struct ArgMinMaxDescriptor
+{
+ ArgMinMaxDescriptor();
+
+ ArgMinMaxFunction m_Function;
+ int m_Axis;
+
+ bool operator ==(const ArgMinMaxDescriptor &rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the BatchNormalization layer. See `INetwork.AddBatchNormalizationLayer()`.
+
+ Contains:
+ m_Eps (float): Value to add to the variance. Used to avoid dividing by zero. Default: 0.0001f.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") BatchNormalizationDescriptor;
+struct BatchNormalizationDescriptor
+{
+ BatchNormalizationDescriptor();
+
+ float m_Eps;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const BatchNormalizationDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the BatchToSpaceNd layer. See `INetwork.AddBatchToSpaceNdLayer()`.
+
+ Contains:
+ m_BlockShape (list of int): Block shape values. Default: (1, 1). Underlying C++ type is unsigned int.
+
+ m_Crops (list of tuple): The values to crop from the input dimension. Default: [(0, 0), (0, 0)].
+
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") BatchToSpaceNdDescriptor;
+struct BatchToSpaceNdDescriptor
+{
+ BatchToSpaceNdDescriptor();
+ BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
+ std::vector<std::pair<unsigned int, unsigned int>> crops);
+
+ std::vector<unsigned int> m_BlockShape;
+ std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const BatchToSpaceNdDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Comparison layer. See `INetwork.AddComparisonLayer()`.
+
+ Contains:
+ m_Operation (ComparisonOperation): Specifies the comparison operation to execute.
+ ") ComparisonDescriptor;
+struct ComparisonDescriptor
+{
+ ComparisonDescriptor();
+
+ ComparisonDescriptor(ComparisonOperation operation);
+
+ bool operator ==(const ComparisonDescriptor &rhs) const;
+
+ armnn::ComparisonOperation m_Operation;
+};
+
+%feature("docstring",
+ "
+ Creates a configuration/descriptor for a Concatenation layer. See `INetwork.AddConcatLayer()`.
+ Number of Views must be equal to the number of inputs, and their order must match e.g. first view corresponds to the first input, second view to the second input, etc.
+
+ Contains:
+ numViews (int): Number of views, the value must be equal to the number of outputs of a layer.
+ numDimensions (int): Number of dimensions. Default value is 4.
+
+ ") ConcatDescriptor;
+struct ConcatDescriptor
+{
+ ConcatDescriptor();
+
+ ConcatDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
+
+ %feature("docstring",
+ "
+ Get the number of views.
+ Returns:
+ int: Number of views.
+ ") GetNumViews;
+ uint32_t GetNumViews() const;
+
+ %feature("docstring",
+ "
+ Get the number of dimensions.
+ Returns:
+ int: Number of dimensions.
+ ") GetNumDimensions;
+ uint32_t GetNumDimensions() const;
+
+ %feature("docstring",
+ "
+ Get the view origin input by index.
+
+ Each view match the inputs order, e.g. first view corresponds to the first input, second view to the second input, etc.
+
+ Args:
+ idx (int): Index to get view from.
+
+ Returns:
+ list: View origin (shape) specified by the int value `idx` as a list of ints.
+ ") GetViewOrigin;
+
+ const uint32_t* GetViewOrigin(uint32_t idx) const;
+
+ %feature("docstring",
+ "
+ Set the concatenation dimension.
+ Args:
+ concatAxis (int): Concatenation axis index.
+ ") SetConcatAxis;
+ void SetConcatAxis(unsigned int concatAxis);
+
+ %feature("docstring",
+ "
+ Get the concatenation dimension.
+ Returns:
+ int: Concatenation axis index.
+ ") GetConcatAxis;
+ unsigned int GetConcatAxis() const;
+
+ bool operator ==(const ConcatDescriptor& rhs) const;
+};
+%extend ConcatDescriptor{
+ %feature("docstring",
+ "
+ Set the coordinates of a specific origin view input.
+
+ Args:
+ view (int): Origin view index.
+ coord (int): Coordinate of the origin view to set.
+ value (int): Value to set.
+ Raises:
+ RuntimeError: If the `view` is greater than or equal to GetNumViews().
+ RuntimeError: If the `coord` is greater than or equal to GetNumDimensions().
+ ") SetViewOriginCoord;
+ void SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value) {
+ armnn::Status status = $self->SetViewOriginCoord(view, coord, value);
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception("Failed to set view origin coordinates.");
+ }
+ };
+}
+
+%feature("docstring",
+ "
+ A descriptor for the Convolution2d layer. See `INetwork.AddConvolution2dLayer()`.
+
+ Contains:
+ m_PadLeft (int): Underlying C++ data type is `uint32_t`. Padding left value in the width dimension. Default: 0.
+ m_PadRight (int): Underlying C++ data type is `uint32_t`. Padding right value in the width dimension. Default: 0.
+ m_PadTop (int): Underlying C++ data type is `uint32_t`. Padding top value in the height dimension. Default: 0.
+ m_PadBottom (int): Underlying C++ data type is `uint32_t`. Padding bottom value in the height dimension. Default: 0.
+ m_StrideX (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the width dimension. Default: 0.
+ m_StrideY (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the height dimension. Default: 0.
+ m_DilationX (int): Underlying C++ data type is `uint32_t`. Dilation along x axis. Default: 1.
+ m_DilationY (int): Underlying C++ data type is `uint32_t`. Dilation along y axis. Default: 1.
+ m_BiasEnabled (bool): Enable/disable bias. Default: false.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") Convolution2dDescriptor;
+struct Convolution2dDescriptor
+{
+ Convolution2dDescriptor();
+
+ uint32_t m_PadLeft;
+ uint32_t m_PadRight;
+ uint32_t m_PadTop;
+ uint32_t m_PadBottom;
+ uint32_t m_StrideX;
+ uint32_t m_StrideY;
+ uint32_t m_DilationX;
+ uint32_t m_DilationY;
+ bool m_BiasEnabled;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const Convolution2dDescriptor& rhs) const;
+};
+
+
+%feature("docstring",
+ "
+ A descriptor for the DepthToSpace layer. See `INetwork.AddDepthToSpaceLayer()`.
+
+ Contains:
+ m_BlockSize (int): Underlying C++ type is `unsigned int`. Scalar specifying the input block size. It must be >= 1. Default: 1.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NHWC.
+
+ ") DepthToSpaceDescriptor;
+struct DepthToSpaceDescriptor
+{
+ DepthToSpaceDescriptor();
+ DepthToSpaceDescriptor(unsigned int blockSize, DataLayout dataLayout);
+
+ unsigned int m_BlockSize;
+ DataLayout m_DataLayout;
+};
+
+
+%feature("docstring",
+ "
+ A descriptor for the DepthwiseConvolution2d layer. See `INetwork.AddDepthwiseConvolution2dLayer()`.
+
+ Contains:
+ m_PadLeft (int): Underlying C++ data type is `uint32_t`. Padding left value in the width dimension. Default: 0.
+ m_PadRight (int): Underlying C++ data type is `uint32_t`. Padding right value in the width dimension. Default: 0.
+ m_PadTop (int): Underlying C++ data type is `uint32_t`. Padding top value in the height dimension. Default: 0.
+ m_PadBottom (int): Underlying C++ data type is `uint32_t`. Padding bottom value in the height dimension. Default: 0.
+ m_StrideX (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the width dimension. Default: 0.
+ m_StrideY (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the height dimension. Default: 0.
+ m_DilationX (int): Underlying C++ data type is `uint32_t`. Dilation along x axis. Default: 1.
+ m_DilationY (int): Underlying C++ data type is `uint32_t`. Dilation along y axis. Default: 1.
+ m_BiasEnabled (bool): Enable/disable bias. Default: false.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") DepthwiseConvolution2dDescriptor;
+struct DepthwiseConvolution2dDescriptor
+{
+ DepthwiseConvolution2dDescriptor();
+
+ uint32_t m_PadLeft;
+ uint32_t m_PadRight;
+ uint32_t m_PadTop;
+ uint32_t m_PadBottom;
+ uint32_t m_StrideX;
+ uint32_t m_StrideY;
+ uint32_t m_DilationX;
+ uint32_t m_DilationY;
+ bool m_BiasEnabled;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const DepthwiseConvolution2dDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the DetectionPostProcess layer. See `INetwork.AddDetectionPostProcessLayer()`.
+
+ This layer is a custom layer used to process the output from SSD MobilenetV1.
+
+ Contains:
+ m_MaxDetections (int): Underlying C++ data type is `uint32_t`. Maximum numbers of detections. Default: 0.
+ m_MaxClassesPerDetection (int): Underlying C++ data type is `uint32_t`. Maximum numbers of classes per detection, used in Fast NMS. Default: 1.
+ m_DetectionsPerClass (int): Underlying C++ data type is `uint32_t`. Detections per classes, used in Regular NMS. Default: 1.
+ m_NmsScoreThreshold (float): Non maximum suppression score threshold. Default: 0.
+ m_NmsIouThreshold (float): Intersection over union threshold. Default: 0.
+ m_NumClasses (int): Underlying C++ data type is `uint32_t`. Number of classes. Default: 0.
+ m_UseRegularNms (bool): Use Regular Non maximum suppression. Default: false.
+ m_ScaleX (float): Center size encoding scale x. Default: 0.
+ m_ScaleY (float): Center size encoding scale y. Default: 0.
+ m_ScaleW (float): Center size encoding scale weight. Default: 0.
+ m_ScaleH (float): Center size encoding scale height. Default: 0.
+
+ ") DetectionPostProcessDescriptor;
+struct DetectionPostProcessDescriptor
+{
+ DetectionPostProcessDescriptor();
+
+ uint32_t m_MaxDetections;
+ uint32_t m_MaxClassesPerDetection;
+ uint32_t m_DetectionsPerClass;
+ float m_NmsScoreThreshold;
+ float m_NmsIouThreshold;
+ uint32_t m_NumClasses;
+ bool m_UseRegularNms;
+ float m_ScaleX;
+ float m_ScaleY;
+ float m_ScaleW;
+ float m_ScaleH;
+
+ bool operator ==(const DetectionPostProcessDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the FakeQuantization layer. See ``.
+
+ Contains:
+ m_Min (float): Minimum value for quantization range. Default: -6.0.
+ m_Max (float): Maximum value for quantization range. Default: 6.0.
+
+ ") FakeQuantizationDescriptor;
+struct FakeQuantizationDescriptor
+{
+ FakeQuantizationDescriptor();
+
+ float m_Min;
+ float m_Max;
+
+ bool operator ==(const FakeQuantizationDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the FullyConnected layer. See `INetwork.AddFullyConnectedLayer()`.
+
+ Contains:
+ m_BiasEnabled (bool): Enable/disable bias. Default: false.
+ m_TransposeWeightMatrix (bool): Enable/disable transpose weight matrix. Default: false.
+
+ ") FullyConnectedDescriptor;
+struct FullyConnectedDescriptor
+{
+ FullyConnectedDescriptor();
+
+ bool m_BiasEnabled;
+ bool m_TransposeWeightMatrix;
+
+ bool operator ==(const FullyConnectedDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for InstanceNormalization layer. See `INetwork.AddInstanceNormalizationLayer()`.
+
+ Contains:
+ m_Gamma (float): Gamma, the scale scalar value applied for the normalized tensor. Default: 1.0.
+ m_Gamma (float): Beta, the offset scalar value applied for the normalized tensor. Default: 0.0.
+ m_Gamma (float): Epsilon, small scalar value added to variance to avoid dividing by zero. Default: 1e-12f.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") InstanceNormalizationDescriptor;
+struct InstanceNormalizationDescriptor
+{
+ InstanceNormalizationDescriptor();
+
+ float m_Gamma;
+ float m_Beta;
+ float m_Eps;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const InstanceNormalizationDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the LSTM layer. See `INetwork.AddLstmLayer()`.
+
+ Contains:
+ m_ActivationFunc (int): Underlying C++ data type is `uint32_t`. The activation function to use. 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
+ Default: 1.
+ m_ClippingThresCell (float): Clipping threshold value for the cell state. Default: 0.0.
+ m_ClippingThresProj (float): Clipping threshold value for the projection. Default: 0.0.
+ m_CifgEnabled (bool): Enable/disable cifg (coupled input & forget gate). Default: true.
+ m_PeepholeEnabled (bool): Enable/disable peephole. Default: false.
+ m_ProjectionEnabled (bool): Enable/disable the projection layer. Default: false.
+ m_LayerNormEnabled (bool): Enable/disable layer normalization. Default: false.
+
+ ") LstmDescriptor;
+struct LstmDescriptor
+{
+ LstmDescriptor();
+
+ uint32_t m_ActivationFunc;
+ float m_ClippingThresCell;
+ float m_ClippingThresProj;
+ bool m_CifgEnabled;
+ bool m_PeepholeEnabled;
+ bool m_ProjectionEnabled;
+ bool m_LayerNormEnabled;
+
+ bool operator ==(const LstmDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A Descriptor for the L2Normalization layer. See `INetwork.AddL2NormalizationLayer()`.
+
+ Contains:
+ m_Eps (float): Used to avoid dividing by zero.. Default: 1e-12f.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") L2NormalizationDescriptor;
+struct L2NormalizationDescriptor
+{
+ L2NormalizationDescriptor();
+
+ float m_Eps;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const L2NormalizationDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Mean layer. See `INetwork.AddMeanLayer()`.
+
+ Contains:
+ m_Axis (list of int): Underlying C++ data type is std::vector<unsigned int>. Used to avoid dividing by zero. Values for the dimensions to reduce.
+ m_KeepDims (bool): Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept. Default: False.
+
+ ") MeanDescriptor;
+struct MeanDescriptor
+{
+ MeanDescriptor();
+ MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims);
+
+ std::vector<unsigned int> m_Axis;
+ bool m_KeepDims;
+
+ bool operator ==(const MeanDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Normalization layer. See `INetwork.AddNormalizationLayer()`.
+
+ Contains:
+ m_NormChannelType (int): Normalization channel algorithm to use (NormalizationAlgorithmMethod_Across, NormalizationAlgorithmMethod_Within).
+ Default: NormalizationAlgorithmChannel_Across.
+ m_NormMethodType (int): Normalization method algorithm to use (NormalizationAlgorithmMethod_LocalBrightness, NormalizationAlgorithmMethod_LocalContrast).
+ Default: NormalizationAlgorithmMethod_LocalBrightness.
+ m_NormSize (int): Underlying C++ data type is `uint32_t`. Depth radius value. Default: 0.
+ m_Alpha (float): Alpha value for the normalization equation. Default: 0.0.
+ m_Beta (float): Beta value for the normalization equation. Default: 0.0.
+ m_K (float): Kappa value used for the across channel normalization equation. Default: 0.0.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") NormalizationDescriptor;
+struct NormalizationDescriptor
+{
+ NormalizationDescriptor();
+
+ NormalizationAlgorithmChannel m_NormChannelType;
+ NormalizationAlgorithmMethod m_NormMethodType;
+ uint32_t m_NormSize;
+ float m_Alpha;
+ float m_Beta;
+ float m_K;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const NormalizationDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Pad layer. See `INetwork.AddPadLayer()`.
+
+ Contains:
+ m_PadList (list of tuple): specifies the padding for input dimension.
+ The first tuple value is the number of values to add before the tensor in the dimension.
+ The second tuple value is the number of values to add after the tensor in the dimension.
+ The number of pairs should match the number of dimensions in the input tensor.
+ m_PadValue (bool): Optional value to use for padding. Default: 0.
+
+ ") PadDescriptor;
+struct PadDescriptor
+{
+ PadDescriptor();
+ PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0);
+
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
+ float m_PadValue;
+
+ bool operator ==(const PadDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Permute layer. See `INetwork.AddPermuteLayer()`.
+
+ Contains:
+ m_DimMappings (PermutationVector): Indicates how to translate tensor elements from a given source into the target destination,
+ when source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
+
+ ") PermuteDescriptor;
+struct PermuteDescriptor
+{
+ PermuteDescriptor();
+ PermuteDescriptor(const PermutationVector& dimMappings);
+
+ PermutationVector m_DimMappings;
+
+ bool operator ==(const PermuteDescriptor &rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Pooling2d layer. See `INetwork.AddPooling2dLayer()`.
+
+ Contains:
+ m_PoolType (int): The pooling algorithm to use (`PoolingAlgorithm_Max`, `PoolingAlgorithm_Average`, `PoolingAlgorithm_L2`). Default: `PoolingAlgorithm_Max`.
+ m_PadLeft (int): Underlying C++ data type is `uint32_t`. Padding left value in the width dimension. Default: 0.
+ m_PadRight (int): Underlying C++ data type is `uint32_t`. Padding right value in the width dimension. Default: 0.
+ m_PadTop (int): Underlying C++ data type is `uint32_t`. Padding top value in the height dimension. Default: 0.
+ m_PadBottom (int): Underlying C++ data type is `uint32_t`. Padding bottom value in the height dimension. Default: 0.
+ m_PoolWidth (int): Underlying C++ data type is `uint32_t`. Pooling width value. Default: 0.
+ m_PoolHeight (int): Underlying C++ data type is `uint32_t`. Pooling height value. Default: 0.
+ m_StrideX (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the width dimension. Default: 0.
+ m_StrideY (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the height dimension. Default: 0.
+ m_OutputShapeRounding (int): The rounding method for the output shape. (OutputShapeRounding_Floor, OutputShapeRounding_Ceiling).
+ Default: OutputShapeRounding_Floor.
+ m_PaddingMethod (int): The padding method to be used. (PaddingMethod_Exclude, PaddingMethod_IgnoreValue).
+ Default: PaddingMethod_Exclude.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") Pooling2dDescriptor;
+struct Pooling2dDescriptor
+{
+ Pooling2dDescriptor();
+
+ PoolingAlgorithm m_PoolType;
+ uint32_t m_PadLeft;
+ uint32_t m_PadRight;
+ uint32_t m_PadTop;
+ uint32_t m_PadBottom;
+ uint32_t m_PoolWidth;
+ uint32_t m_PoolHeight;
+ uint32_t m_StrideX;
+ uint32_t m_StrideY;
+ OutputShapeRounding m_OutputShapeRounding;
+ PaddingMethod m_PaddingMethod;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const Pooling2dDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Reshape layer. See `INetwork.AddReshapeLayer()`.
+
+ Contains:
+ m_TargetShape (TensorShape): Target shape value.
+
+ ") ReshapeDescriptor;
+struct ReshapeDescriptor
+{
+ ReshapeDescriptor();
+ ReshapeDescriptor(const armnn::TensorShape& shape);
+
+ armnn::TensorShape m_TargetShape;
+
+ bool operator ==(const ReshapeDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Resize layer. See `INetwork.AddResizeLayer()`.
+
+ Contains:
+ m_TargetWidth (int): Underlying C++ data type is `uint32_t`. Target width value. Default: 0.
+ m_TargetHeight (int): Underlying C++ data type is `uint32_t`. Target height value. Default: 0.
+ m_Method (int): The Interpolation method to use (ResizeMethod_Bilinear, ResizeMethod_NearestNeighbor).
+ Default: ResizeMethod_NearestNeighbor.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") ResizeDescriptor;
+struct ResizeDescriptor
+{
+ ResizeDescriptor();
+
+ uint32_t m_TargetWidth;
+ uint32_t m_TargetHeight;
+ ResizeMethod m_Method;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const ResizeDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Slice layer. See `INetwork.AddSliceLayer()`.
+
+ Contains:
+ m_Begin (list of int): Underlying C++ data type is std::vector<unsigned int>. Beginning indices of the slice in each dimension.
+ m_Size (list of int): Underlying C++ data type is std::vector<unsigned int>. Size of the slice in each dimension.
+
+ ") SliceDescriptor;
+struct SliceDescriptor
+{
+ SliceDescriptor();
+ SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size);
+
+ std::vector<unsigned int> m_Begin;
+ std::vector<unsigned int> m_Size;
+
+ bool operator ==(const SliceDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Space To Batch N-dimensions layer. See `INetwork.AddSpaceToBatchNdLayer()`.
+
+ Contains:
+ m_BlockShape (list of int): Underlying C++ data type is std::vector<unsigned int>. Block shape values. Default: [1, 1].
+ m_Crops (list of tuple): Specifies the padding values for the input dimension:
+ [heightPad - (top, bottom) widthPad - (left, right)].
+ Default: [(0, 0), (0, 0)].
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+ ") SpaceToBatchNdDescriptor;
+struct SpaceToBatchNdDescriptor
+{
+ SpaceToBatchNdDescriptor();
+ SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList);
+
+ std::vector<unsigned int> m_BlockShape;
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const SpaceToBatchNdDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the SpaceToDepth layer. See `INetwork.AddSpaceToDepthLayer()`.
+
+ Contains:
+ m_BlockSize (int): Underlying C++ type is `unsigned int`. Scalar specifying the input block size. It must be >= 1. Default: 1.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NHWC.
+
+ ") SpaceToDepthDescriptor;
+struct SpaceToDepthDescriptor
+{
+ SpaceToDepthDescriptor();
+ SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout);
+
+ unsigned int m_BlockSize;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const SpaceToDepthDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for a Splitter layer. See `INetwork.AddSplitterLayer()`.
+
+ Args:
+ numViews (int): Number of views, the value must be equal to the number of outputs of a layer.
+ numDimensions (int): Number of dimensions. Default value is 4.
+
+ ") SplitterDescriptor;
+struct SplitterDescriptor
+{
+
+ SplitterDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
+
+ SplitterDescriptor();
+
+ %feature("docstring",
+ "
+ Get the number of views.
+ Returns:
+ int: number of views.
+ ") GetNumViews;
+ uint32_t GetNumViews() const;
+
+ %feature("docstring",
+ "
+ Get the number of dimensions.
+
+ Returns:
+ int: Number of dimensions.
+
+ ") GetNumDimensions;
+ uint32_t GetNumDimensions() const;
+
+ %feature("docstring",
+ "
+ Get the output view origin (shape) by index, the order matches the outputs.
+
+ e.g. first view corresponds to the first output, second view to the second output, etc.
+ Args:
+ idx (int): Index.
+ Returns:
+ list: View origin (shape) as a list of ints.
+ ") GetViewOrigin;
+
+ const uint32_t* GetViewOrigin(uint32_t idx) const;
+
+ %feature("docstring",
+ "
+ Get the view sizes by index.
+ Args:
+ idx (int): Index.
+ Returns:
+ list: Sizes for the specified index as a list of ints.
+ ") GetViewSizes;
+ const uint32_t* GetViewSizes(uint32_t idx) const;
+
+
+ %feature("docstring",
+ "
+ Get the view origins that describe how the splitting process is configured.
+
+ The number of views is the number of outputs, and their order match.
+ Returns:
+ OriginsDescriptor: A descriptor for the origins view.
+ ") GetOrigins;
+ const ConcatDescriptor GetOrigins() const;
+
+ bool operator ==(const SplitterDescriptor& rhs) const;
+};
+
+%extend SplitterDescriptor{
+ %feature("docstring",
+ "
+ Set the value of a specific origin view input coordinate.
+
+ Contains:
+ view (int): Origin view index.
+ coord (int): Coordinate of the origin view to set.
+ value (int): Value to set.
+ Raises:
+ RuntimeError: If the `view` is greater than or equal to GetNumViews().
+ If the `coord` is greater than or equal to GetNumDimensions().
+ ") SetViewOriginCoord;
+ void SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value) {
+ armnn::Status status = $self->SetViewOriginCoord(view, coord, value);
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception("Failed to set view origin coordinates.");
+ }
+ };
+
+ %feature("docstring",
+ "
+ Set the size of the views.
+
+ Args:
+ view (int): View index.
+ coord (int): Coordinate of the origin view to set.
+ value (int): Value to set.
+ Raises:
+ RuntimeError: If the `view` is greater than or equal to GetNumViews().
+ If the `coord` is greater than or equal to GetNumDimensions().
+ ") SetViewSize;
+ void SetViewSize(uint32_t view, uint32_t coord, uint32_t value) {
+ armnn::Status status = $self->SetViewSize(view, coord, value);
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception("Failed to set view size.");
+ }
+ }
+}
+
+%feature("docstring",
+ "
+ A descriptor for the Stack layer. See `INetwork.AddStackLayer()`.
+
+ Contains:
+ m_Axis (int): Underlying C++ type is `unsigned int`. 0-based axis along which to stack the input tensors. Default: 0.
+ m_NumInputs (int): Required shape of all input tensors. Default: 0.
+ m_InputShape (TensorShape): Required shape of all input tensors.
+
+ ") StackDescriptor;
+struct StackDescriptor
+{
+ StackDescriptor();
+ StackDescriptor(uint32_t axis, uint32_t numInputs, const armnn::TensorShape& inputShape);
+
+ uint32_t m_Axis;
+ uint32_t m_NumInputs;
+ armnn::TensorShape m_InputShape;
+
+ bool operator ==(const StackDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the StandIn layer. See `INetwork.AddStandInLayer()`.
+
+ Contains:
+ m_NumInputs (int): Underlying C++ type is `unsigned int`. Number of input tensors. Default: 0.
+ m_NumOutputs (int): Underlying C++ type is `unsigned int`. Number of output tensors. Default: 0.
+
+ ") StandInDescriptor;
+struct StandInDescriptor
+{
+ StandInDescriptor();
+
+ StandInDescriptor(uint32_t numInputs, uint32_t numOutputs);
+
+ uint32_t m_NumInputs = 0;
+ uint32_t m_NumOutputs = 0;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the StridedSlice layer. See `INetwork.AddStridedSliceLayer()`.
+
+ Contains:
+ m_Begin (list of int): Underlying C++ data type is `std::vector<int>`. Begin values for the input that will be sliced.
+
+ m_End (list of int): Underlying C++ data type is `std::vector<int>`. End values for the input that will be sliced.
+
+ m_Stride (list of int): Underlying C++ data type is `std::vector<int>`. Stride values for the input that will be sliced.
+
+ m_BeginMask (int): Underlying C++ data type is `int32_t`. Begin mask value. If set, then the begin is disregarded and
+ the fullest range is used for the dimension. Default: 0.
+
+ m_EndMask (int): Underlying C++ data type is `int32_t`. End mask value. If set, then the end is disregarded and
+ the fullest range is used for the dimension.Default: 0.
+
+ m_ShrinkAxisMask (int): Underlying C++ data type is `int32_t`. Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1. Default: 0.
+
+ m_EllipsisMask (int): Underlying C++ data type is `int32_t`. Ellipsis mask value. Default: 0.
+
+ m_NewAxisMask (int): Underlying C++ data type is `int32_t`. New axis mask value. If set, the begin, end and stride is disregarded and
+ a new 1 dimension is inserted to this location of the output tensor. Default: 0.
+
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") StridedSliceDescriptor;
+struct StridedSliceDescriptor
+{
+ StridedSliceDescriptor();
+ StridedSliceDescriptor(const std::vector<int> begin,
+ const std::vector<int> end,
+ const std::vector<int> stride);
+
+ int GetStartForAxis(const armnn::TensorShape& inputShape, unsigned int axis) const;
+ int GetStopForAxis(const armnn::TensorShape& inputShape, unsigned int axis, int startForAxis) const;
+
+ std::vector<int> m_Begin;
+ std::vector<int> m_End;
+ std::vector<int> m_Stride;
+
+ int32_t m_BeginMask;
+ int32_t m_EndMask;
+ int32_t m_ShrinkAxisMask;
+ int32_t m_EllipsisMask;
+ int32_t m_NewAxisMask;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const StridedSliceDescriptor& rhs) const;
+};
+
+%feature("docstring",
+ "
+ A descriptor for the Softmax layer. See `INetwork.AddSoftmaxLayer()`.
+
+ Contains:
+ m_Beta (float): Exponentiation value.
+ m_Axis (int): Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
+ ") SoftmaxDescriptor;
+struct SoftmaxDescriptor
+{
+ SoftmaxDescriptor();
+
+ float m_Beta;
+ int m_Axis;
+
+ bool operator ==(const SoftmaxDescriptor& rhs) const;
+};
+
+
+%feature("docstring",
+ "
+ A descriptor for the TransposeConvolution2d layer. See `INetwork.AddTransposeConvolution2dLayer()`.
+
+ Contains:
+ m_PadLeft (int): Underlying C++ data type is `uint32_t`. Padding left value in the width dimension. Default: 0.
+ m_PadRight (int): Underlying C++ data type is `uint32_t`. Padding right value in the width dimension. Default: 0.
+ m_PadTop (int): Underlying C++ data type is `uint32_t`. Padding top value in the height dimension. Default: 0.
+ m_PadBottom (int): Underlying C++ data type is `uint32_t`. Padding bottom value in the height dimension. Default: 0.
+ m_StrideX (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the width dimension. Default: 0.
+ m_StrideY (int): Underlying C++ data type is `uint32_t`. Stride value when proceeding through input for the height dimension. Default: 0.
+ m_BiasEnabled (bool): Enable/disable bias. Default: false.
+ m_DataLayout (int): The data layout to be used (DataLayout_NCHW, DataLayout_NHWC). Default: DataLayout_NCHW.
+
+ ") TransposeConvolution2dDescriptor;
+struct TransposeConvolution2dDescriptor
+{
+ TransposeConvolution2dDescriptor();
+
+ uint32_t m_PadLeft;
+ uint32_t m_PadRight;
+ uint32_t m_PadTop;
+ uint32_t m_PadBottom;
+ uint32_t m_StrideX;
+ uint32_t m_StrideY;
+ bool m_BiasEnabled;
+ DataLayout m_DataLayout;
+
+ bool operator ==(const TransposeConvolution2dDescriptor& rhs) const;
+};
+
+
+using ConcatDescriptor = OriginsDescriptor;
+using LogSoftmaxDescriptor = SoftmaxDescriptor;
+using SplitterDescriptor = ViewsDescriptor;
+
+%list_to_vector_clear(std::vector<unsigned int>);
+%list_to_vector_clear(std::vector<int>);
+%list_to_vector_clear(std::vector<std::pair<unsigned int, unsigned int>>);
+}
+
+%{
+ armnn::ConcatDescriptor CreateDescriptorForConcatenation(std::vector<armnn::TensorShape> shapes,
+ unsigned int concatenationDimension)
+ {
+ return armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatenationDimension);
+ };
+%}
+
+%feature("docstring",
+ "
+ Create a descriptor for Concatenation layer.
+ Args:
+ shapes (list of TensorShape): Input shapes.
+ concatenationDimension (unsigned int): Concatenation axis.
+
+ Returns:
+ ConcatDescriptor: A descriptor object for a concatenation layer.
+ ") CreateDescriptorForConcatenation;
+armnn::ConcatDescriptor CreateDescriptorForConcatenation(std::vector<armnn::TensorShape> shapes,
+ unsigned int concatenationDimension);
+
+%typemap(out) const uint32_t*;
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_lstmparam.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_lstmparam.i
new file mode 100644
index 0000000000..a0e993c7ac
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_lstmparam.i
@@ -0,0 +1,97 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/LstmParams.hpp"
+%}
+
+namespace armnn
+{
+
+%feature("docstring",
+ "
+ Long Short-Term Memory layer input parameters.
+
+ See `INetwork.AddLstmLayer()`.
+ Operation described by the following equations:
+
+ \[i_t=\sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) \\\\
+ f_t=\sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) \\\\
+ C_t=clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) \\\\
+ o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) \\\\
+ h_t = clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})\ if\ there\ is\ a\ projection; \\\\
+ h_t = o_t \odot g(C_t)\ otherwise. \]
+ Where:
+ \(x_t\) - input;
+ \(i_t\) - input gate;
+ \(f_t\) - forget gate;
+ \(C_t\) - cell state;
+ \(o_t\) - output;
+ \(h_t\) - output state;
+ \(\sigma\) - logistic sigmoid function;
+ \(g\) - cell input and cell output activation function, see `LstmDescriptor.m_ActivationFunc`;
+ \(t_{cell}\) - threshold for clipping the cell state, see `LstmDescriptor.m_ClippingThresCell`;
+ \(t_{proj}\) - threshold for clipping the projected output, see `LstmDescriptor.m_ClippingThresProj`;
+
+ Contains:
+ m_InputToInputWeights (ConstTensor): \(W_{xi}\), input-to-input weight matrix.
+ m_InputToForgetWeights (ConstTensor): \(W_{xf}\), input-to-forget weight matrix.
+ m_InputToCellWeights (ConstTensor): \(W_{xc}\), input-to-cell weight matrix.
+ m_InputToOutputWeights (ConstTensor): \(W_{xo}\), input-to-output weight matrix.
+
+ m_RecurrentToInputWeights (ConstTensor): \(W_{hi}\), recurrent-to-input weight matrix.
+ m_RecurrentToForgetWeights (ConstTensor): \(W_{hf}\), recurrent-to-forget weight matrix.
+ m_RecurrentToCellWeights (ConstTensor): \(W_{hc}\), recurrent-to-cell weight matrix.
+ m_RecurrentToOutputWeights (ConstTensor): \(W_{ho}\), recurrent-to-output weight matrix.
+
+ m_CellToInputWeights (ConstTensor): \(W_{ci}\), cell-to-input weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`.
+ m_CellToForgetWeights (ConstTensor): \(W_{cf}\), cell-to-forget weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`.
+ m_CellToOutputWeights (ConstTensor): \(W_{co}\), cell-to-output weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`.
+
+ m_InputGateBias (ConstTensor): \(b_i\), input gate bias.
+ m_ForgetGateBias (ConstTensor): \(b_f\), forget gate bias.
+ m_CellBias (ConstTensor): \(b_c\), cell bias.
+ m_OutputGateBias (ConstTensor): \(b_o\), output gate bias.
+
+ m_ProjectionWeights (ConstTensor): \(W_{proj}\), projection weight matrix.
+ Has effect if `LstmDescriptor.m_ProjectionEnabled` is set to True.
+ m_ProjectionBias (ConstTensor): \(b_{proj}\), projection bias.
+ Has effect if `LstmDescriptor.m_ProjectionEnabled` is set to True.
+ m_InputLayerNormWeights (ConstTensor): normalisation weights for input,
+ has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
+ m_ForgetLayerNormWeights (ConstTensor): normalisation weights for forget gate,
+ has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
+ m_CellLayerNormWeights (ConstTensor): normalisation weights for current cell,
+ has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
+ m_OutputLayerNormWeights (ConstTensor): normalisation weights for output gate,
+ has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
+
+ ") LstmInputParams;
+struct LstmInputParams
+{
+ LstmInputParams();
+
+ const armnn::ConstTensor* m_InputToInputWeights;
+ const armnn::ConstTensor* m_InputToForgetWeights;
+ const armnn::ConstTensor* m_InputToCellWeights;
+ const armnn::ConstTensor* m_InputToOutputWeights;
+ const armnn::ConstTensor* m_RecurrentToInputWeights;
+ const armnn::ConstTensor* m_RecurrentToForgetWeights;
+ const armnn::ConstTensor* m_RecurrentToCellWeights;
+ const armnn::ConstTensor* m_RecurrentToOutputWeights;
+ const armnn::ConstTensor* m_CellToInputWeights;
+ const armnn::ConstTensor* m_CellToForgetWeights;
+ const armnn::ConstTensor* m_CellToOutputWeights;
+ const armnn::ConstTensor* m_InputGateBias;
+ const armnn::ConstTensor* m_ForgetGateBias;
+ const armnn::ConstTensor* m_CellBias;
+ const armnn::ConstTensor* m_OutputGateBias;
+ const armnn::ConstTensor* m_ProjectionWeights;
+ const armnn::ConstTensor* m_ProjectionBias;
+ const armnn::ConstTensor* m_InputLayerNormWeights;
+ const armnn::ConstTensor* m_ForgetLayerNormWeights;
+ const armnn::ConstTensor* m_CellLayerNormWeights;
+ const armnn::ConstTensor* m_OutputLayerNormWeights;
+};
+}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
new file mode 100644
index 0000000000..90454858da
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -0,0 +1,1159 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/INetwork.hpp"
+#include "armnn/BackendId.hpp"
+#include "armnn/Types.hpp"
+#include "armnn/Optional.hpp"
+#include <fstream>
+%}
+
+%include <typemaps/network_optimize.i>
+
+namespace armnn
+{
+%feature("docstring",
+"
+Struct for holding options relating to the Arm NN optimizer. See `Optimize`.
+
+Contains:
+ m_debug (bool): ...
+ m_ReduceFp32ToFp16 (bool): ...
+
+") OptimizerOptions;
+struct OptimizerOptions
+{
+ OptimizerOptions();
+
+ OptimizerOptions(bool reduceFp32ToFp16, bool debug);
+
+ bool m_ReduceFp32ToFp16;
+ bool m_Debug;
+};
+
+%feature("docstring",
+"
+An input connection slot for a layer. Slot lifecycle is managed by the layer.
+
+The input slot can be connected to an output slot of the preceding layer in the graph.
+Only one connection to the input slot is allowed.
+
+") IInputSlot;
+%nodefaultctor IInputSlot;
+%nodefaultdtor IInputSlot;
+class IInputSlot
+{
+public:
+ %feature("docstring",
+ "
+ Returns output slot of a preceding layer that is connected to the given input slot.
+
+ Returns:
+ IOutputSlot: Borrowed reference to an output connection slot for a preceding layer.
+
+ ") GetConnection;
+
+ armnn::IOutputSlot* GetConnection();
+};
+
+%feature("docstring",
+"
+An output connection slot for a layer. Slot lifecycle is managed by the layer.
+
+The output slot may be connected to 1 or more input slots of subsequent layers in the graph.
+") IOutputSlot;
+%nodefaultctor IOutputSlot;
+%nodefaultdtor IOutputSlot;
+class IOutputSlot
+{
+public:
+
+ %feature("docstring",
+ "
+ Returns the total number of connected input slots.
+
+ The same result could be obtained by calling `len()`:
+
+ >>> output_slot = ...
+ >>> size = len(output_slot)
+ >>> assert size == output_slot.GetNumConnections()
+
+ Returns:
+ int: Number of connected input slots.
+ ") GetNumConnections;
+ unsigned int GetNumConnections();
+
+
+ %feature("docstring",
+ "
+ Retrieves connected input slot by index.
+
+ The same result could be obtained by using square brackets:
+
+ >>> output_slot = ...
+ >>> connected_input_slot = output_slot[0]
+
+ Args:
+ index (int): Slot index.
+
+ Returns:
+ IInputSlot: Borrowed reference to connected input slot with given index.
+
+ Raises:
+ RuntimeError: If index out of bounds.
+ ") GetConnection;
+ armnn::IInputSlot* GetConnection(unsigned int index);
+
+ %feature("docstring",
+ "
+ Sets tensor info for output slot.
+ Operation does not change TensorInfo ownership.
+ Args:
+ tensorInfo (TensorInfo): Output tensor info.
+
+ ") SetTensorInfo;
+ void SetTensorInfo(const armnn::TensorInfo& tensorInfo);
+
+ %feature("docstring",
+ "
+ Gets tensor info for output slot.
+
+ Args:
+ tensorInfo (TensorInfo): Output tensor info.
+
+ ") GetTensorInfo;
+ const armnn::TensorInfo& GetTensorInfo();
+
+ %feature("docstring",
+ "
+ Checks if tensor info was set previously.
+
+ Returns:
+ bool: True if output tensor info was set, False - otherwise.
+
+ ") IsTensorInfoSet;
+ bool IsTensorInfoSet();
+
+ %feature("docstring",
+ "
+ Connects this output slot with given input slot.
+ Input slot is updated with this output connection.
+
+ Args:
+ destination (IInputSlot): Output tensor info.
+
+ Returns:
+ int: Total number of connections.
+
+ Raises:
+ RuntimeError: If input slot was already connected.
+
+ ") Connect;
+ int Connect(IInputSlot& destination);
+
+ %feature("docstring",
+ "
+ Disconnects this output slot from given input slot.
+
+ Args:
+ slot (IInputSlot): Input slot to disconnect from.
+
+ ") Disconnect;
+ void Disconnect(IInputSlot& slot);
+
+ %feature("docstring",
+ "
+ Calculates the index of this slot for the layer.
+
+ Returns:
+ int: Slot index.
+
+ ") CalculateIndexOnOwner;
+ unsigned int CalculateIndexOnOwner();
+
+ %feature("docstring",
+ "
+ Returns the index of the layer. Same value as `IConnectableLayer.GetGuid`.
+
+ Returns:
+ int: Layer id.
+
+ ") GetOwningLayerGuid;
+ unsigned int GetOwningLayerGuid();
+
+};
+
+%extend IOutputSlot {
+
+ armnn::IInputSlot* __getitem__(unsigned int index) {
+ return $self->GetConnection(index);
+ }
+
+ unsigned int __len__() const {
+ return $self->GetNumConnections();
+ }
+
+}
+
+%feature("docstring",
+"
+Interface for a layer that is connectable to other layers via `IInputSlot` and `IOutputSlot`.
+The object implementing this interface is returned by `INetwork` when calling `add*Layer` methods.
+
+") IConnectableLayer;
+%nodefaultctor IConnectableLayer;
+%nodefaultdtor IConnectableLayer;
+class IConnectableLayer
+{
+public:
+ %feature("docstring",
+ "
+ Returns the name of the layer. Name attribute is optional for a layer, thus
+ `None` value could be returned.
+
+ Returns:
+ str: Layer name or `None`.
+
+ ") GetName;
+ const char* GetName();
+
+ %feature("docstring",
+ "
+ Gets the number of input slots for the layer.
+
+ Returns:
+ int: Number of input slots.
+
+ ") GetNumInputSlots;
+ unsigned int GetNumInputSlots();
+
+ %feature("docstring",
+ "
+ Gets the number of output slots for the layer.
+
+ Returns:
+ int: Number of output slots.
+
+ ") GetNumOutputSlots;
+ unsigned int GetNumOutputSlots();
+
+ %feature("docstring",
+ "
+ Gets the input slot by index.
+
+ Args:
+ index (int): Slot index.
+
+ Returns:
+ IInputSlot: Borrowed reference to input slot.
+
+ ") GetInputSlot;
+ armnn::IInputSlot& GetInputSlot(unsigned int index);
+
+ %feature("docstring",
+ "
+ Gets the output slot by index.
+
+ Args:
+ index (int): Slot index.
+
+ Returns:
+ IOutputSlot: Borrowed reference to output slot.
+
+ ") GetOutputSlot;
+ armnn::IOutputSlot& GetOutputSlot(unsigned int index);
+
+
+ %feature("docstring",
+ "
+ Gets the unique layer id (within one process).
+ Guid is generated and assigned automatically when the layer is created.
+
+ Returns:
+ int: The unique layer id.
+
+ ") GetGuid;
+ unsigned int GetGuid();
+};
+
+%feature("docstring",
+ "
+ Interface for a network object. Network objects contain the whole computation graph, made up of different layers connected together.
+
+ INetwork objects can be constructed manually or obtained by using parsers. INetwork objects are used to create optimized networks, see `Optimize`.
+
+ ") INetwork;
+%nodefaultctor INetwork;
+%nodefaultdtor INetwork;
+class INetwork
+{
+public:
+
+ %feature("docstring",
+ "
+ Adds an input layer to the network. Input layers are placed at the start of a network and used for feeding input data during inference.
+
+ Args:
+ id (int): User generated id to uniquely identify a particular input. The same id needs to be specified
+ when passing the inputs to the IRuntime::EnqueueWorkload() function.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddInputLayer;
+ armnn::IConnectableLayer* AddInputLayer(int id, const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds an addition layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddAdditionLayer;
+ armnn::IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds an output layer to the network. Output layer is the final layer in your network.
+
+ Args:
+ id (int): User generated id to uniquely identify a particular input. The same id needs to be specified
+ when passing the inputs to `IRuntime.EnqueueWorkload()`.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddOutputLayer;
+ armnn::IConnectableLayer* AddOutputLayer(int id, const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Add an Absolute layer to the network. Calculates the absolute value of its inputs.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddAbsLayer;
+ armnn::IConnectableLayer* AddAbsLayer(const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds an Activation layer to the network. Type of activation is decided by activationDescriptor.
+
+ Args:
+ activationDescriptor (ActivationDescriptor): ActivationDescriptor to configure the activation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddActivationLayer;
+ armnn::IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+ const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds an ArgMinMax layer to the network.
+
+ Args:
+ desc (ArgMinMaxDescriptor): Parameters for the ArgMinMax layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddArgMinMaxLayer;
+ armnn::IConnectableLayer* AddArgMinMaxLayer(const armnn::ArgMinMaxDescriptor& desc,
+ const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds a Batch Normalization layer to the network.
+
+ Args:
+ mean (ConstTensor): Pre-calculated mean for each channel.
+ variance (ConstTensor): Pre-calculated variance for each channel.
+ beta (ConstTensor): Per-channel additive factor.
+ gamma (ConstTensor): Per-channel multiplicative factor.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddBatchNormalizationLayer;
+ armnn::IConnectableLayer* AddBatchNormalizationLayer(const armnn::BatchNormalizationDescriptor& desc,
+ const armnn::ConstTensor& mean,
+ const armnn::ConstTensor& variance,
+ const armnn::ConstTensor& beta,
+ const armnn::ConstTensor& gamma,
+ const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds a Batch To Space ND layer to the network.
+
+ Args:
+ batchToSpaceNdDescriptor (BatchToSpaceNdDescriptor): Configuration parameters for the layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddBatchToSpaceNdLayer;
+ armnn::IConnectableLayer* AddBatchToSpaceNdLayer(const armnn::BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Comparison layer to the network.
+
+ Args:
+ comparisonDescriptor (ComparisonDescriptor): Configuration parameters for the layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddComparisonLayer;
+ armnn::IConnectableLayer* AddComparisonLayer(const armnn::ComparisonDescriptor& comparisonDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Concatenation layer to the network.
+
+ Args:
+ concatDescriptor (ConcatDescriptor): Parameters to configure the Concatenation layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddConcatLayer;
+ armnn::IConnectableLayer* AddConcatLayer(const armnn::ConcatDescriptor& concatDescriptor,
+ const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds a layer with no inputs and a single output, which always corresponds to the passed in constant tensor.
+
+ Args:
+ input (ConstTensor): Tensor to be provided as the only output of the layer. The layer will maintain
+ its own copy of the tensor data, meaning the memory referenced by input can
+ be freed or reused after this function is called.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddConstantLayer;
+ armnn::IConnectableLayer* AddConstantLayer(const armnn::ConstTensor& input,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Depth To Space layer to the network.
+
+ Args:
+ depthToSpaceDescriptor (DepthToSpaceDescriptor): Parameters for the depth to space operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddDepthToSpaceLayer;
+ armnn::IConnectableLayer* AddDepthToSpaceLayer(const armnn::DepthToSpaceDescriptor& depthToSpaceDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Dequantize layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddDequantizeLayer;
+ armnn::IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds a Detection PostProcess layer to the network. Detection PostProcess is a custom layer for SSD MobilenetV1.
+
+ Args:
+ descriptor (DetectionPostProcessDescriptor): Description of the Detection PostProcess layer.
+ anchors (ConstTensor): Tensor for anchors.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddDetectionPostProcessLayer;
+ armnn::IConnectableLayer* AddDetectionPostProcessLayer(
+ const armnn::DetectionPostProcessDescriptor& descriptor,
+ const armnn::ConstTensor& anchors,
+ const char* name = nullptr);
+
+
+ %feature("docstring",
+ "
+ Adds a Division layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddDivisionLayer;
+ armnn::IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Floor layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddFloorLayer;
+ armnn::IConnectableLayer* AddFloorLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Add Gather layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddGatherLayer;
+ armnn::IConnectableLayer* AddGatherLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds an Instance Normalization layer to the network.
+
+ Args:
+ desc (InstanceNormalizationDescriptor): Parameters for the instance normalization operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddInstanceNormalizationLayer;
+ armnn::IConnectableLayer* AddInstanceNormalizationLayer(const armnn::InstanceNormalizationDescriptor& desc,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Log Softmax layer to the network.
+
+ Args:
+ desc (SoftmaxDescriptor): parameters to configure the log softmax.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddLogSoftmaxLayer;
+ armnn::IConnectableLayer* AddLogSoftmaxLayer(const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds an L2 Normalization layer to the network.
+ Normalization is performed along dimension 1, but requires a 4d input.
+
+ Args:
+ desc (L2NormalizationDescriptor): Parameters for the L2 normalization operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddL2NormalizationLayer;
+ armnn::IConnectableLayer* AddL2NormalizationLayer(const armnn::L2NormalizationDescriptor& desc,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Add a Long Short-Term Memory layer to the network.
+
+ Args:
+ descriptor (LstmDescriptor): Parameters for the Lstm operation.
+ params (LstmInputParams): Weights and biases for the LSTM cell.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddLstmLayer;
+ armnn::IConnectableLayer* AddLstmLayer(const armnn::LstmDescriptor& descriptor,
+ const armnn::LstmInputParams& params,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Add a Maximum layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddMaximumLayer;
+ armnn::IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Mean layer to the network.
+
+ Args:
+ meanDescriptor (meanDescriptor): Parameters for the mean operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddMeanLayer;
+ armnn::IConnectableLayer* AddMeanLayer(const armnn::MeanDescriptor& meanDescriptor, const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Merge layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddMergeLayer;
+ armnn::IConnectableLayer* AddMergeLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Minimum layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddMinimumLayer;
+ armnn::IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Multiplication layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddMultiplicationLayer;
+ armnn::IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Normalization layer to the network.
+
+ Args:
+ normalizationDescriptor (NormalizationDescriptor): Parameters to configure the normalization.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddNormalizationLayer;
+ armnn::IConnectableLayer* AddNormalizationLayer(const armnn::NormalizationDescriptor& normalizationDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Pad layer to the network.
+
+ Args:
+ padDescriptor (PadDescriptor): Padding configuration for the layer. See `PadDescriptor` for more details.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddPadLayer;
+ armnn::IConnectableLayer* AddPadLayer(const armnn::PadDescriptor& padDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Permute layer to the network.
+
+ Args:
+ permuteDescriptor (PermuteDescriptor): Configuration of the permutation layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddPermuteLayer;
+ armnn::IConnectableLayer* AddPermuteLayer(const armnn::PermuteDescriptor& permuteDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Pooling layer to the network. Type of pooling is decided by the configuration.
+
+ Args:
+ pooling2dDescriptor (Pooling2dDescriptor): Configuration for the pooling layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddPooling2dLayer;
+ armnn::IConnectableLayer* AddPooling2dLayer(const armnn::Pooling2dDescriptor& pooling2dDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a PReLU layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddPreluLayer;
+ armnn::IConnectableLayer* AddPreluLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Quantize layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddQuantizeLayer;
+ armnn::IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Quantized Long Short-Term Memory layer to the network.
+
+ Args:
+ params (QuantizedLstmInputParams): The weights and biases for the Quantized LSTM cell.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddQuantizedLstmLayer;
+ armnn::IConnectableLayer* AddQuantizedLstmLayer(const armnn::QuantizedLstmInputParams& params,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Reshape layer to the network.
+
+ Args:
+ reshapeDescriptor (ReshapeDescriptor): Parameters for the reshape operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddReshapeLayer;
+ armnn::IConnectableLayer* AddReshapeLayer(const armnn::ReshapeDescriptor& reshapeDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Resize layer to the network.
+
+ Args:
+ resizeDescriptor (ResizeDescriptor): Configuration for the resize layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddResizeLayer;
+ armnn::IConnectableLayer* AddResizeLayer(const armnn::ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds Reciprocal of square root layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddRsqrtLayer;
+ armnn::IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Slice layer to the network.
+
+ Args:
+ sliceDescriptor (SliceDescriptor): Descriptor to configure the slice operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSliceLayer;
+ armnn::IConnectableLayer* AddSliceLayer(const armnn::SliceDescriptor& sliceDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Softmax layer to the network.
+
+ If the data type is `DataType_QuantisedAsymm8`, then the output quantization parameters
+ must have a scale of 1/256 and an offset of 0.
+
+ Args:
+ softmaxDescriptor (SoftmaxDescriptor): Configuration for the softmax layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSoftmaxLayer;
+ armnn::IConnectableLayer* AddSoftmaxLayer(const armnn::SoftmaxDescriptor& softmaxDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Space To Batch layer to the network.
+
+ Args:
+ spaceToBatchNdDescriptor (SpaceToBatchNdDescriptor): Configuration for the space to batch layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSpaceToBatchNdLayer;
+ armnn::IConnectableLayer* AddSpaceToBatchNdLayer(const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a space to depth layer to the network.
+
+ Args:
+ spaceToDepthDescriptor (SpaceToDepthDescriptor): Parameters for the space to depth operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSpaceToDepthLayer;
+ armnn::IConnectableLayer* AddSpaceToDepthLayer(const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Splitter layer to the network.
+
+ Args:
+ splitterDescriptor (SplitterDescriptor): Parameters to configure the splitter layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSplitterLayer;
+ armnn::IConnectableLayer* AddSplitterLayer(const armnn::SplitterDescriptor& splitterDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Stack layer to the network.
+
+ Args:
+ descriptor (StackDescriptor): Descriptor to configure the stack layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddStackLayer;
+ armnn::IConnectableLayer* AddStackLayer(const armnn::StackDescriptor& descriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a StandIn layer to the network.
+
+ Args:
+ descriptor (StandInDescriptor): Parameters to configure the standIn layer.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddStandInLayer;
+ armnn::IConnectableLayer* AddStandInLayer(const armnn::StandInDescriptor& descriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Strided Slice layer to the network.
+
+ Args:
+ stridedSliceDescriptor (StridedSliceDescriptor): Parameters for the strided slice operation.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddStridedSliceLayer;
+ armnn::IConnectableLayer* AddStridedSliceLayer(const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
+ const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Subtraction layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSubtractionLayer;
+ armnn::IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
+
+ %feature("docstring",
+ "
+ Adds a Switch layer to the network.
+
+ Args:
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddSwitchLayer;
+ armnn::IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
+
+};
+
+%extend INetwork {
+
+ INetwork() {
+ return armnn::INetwork::CreateRaw();
+ }
+
+ ~INetwork() {
+ armnn::INetwork::Destroy($self);
+ }
+
+ %feature("docstring",
+ "
+ Adds a Fully Connected layer to the network. Also known as a Linear or Dense layer.
+
+ Args:
+ fullyConnectedDescriptor (FullyConnectedDescriptor): Description of the fully connected layer.
+ weights (ConstTensor): Tensor for the weights data.
+ biases (ConstTensor): Optional tensor for the bias data.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddFullyConnectedLayer;
+ armnn::IConnectableLayer* AddFullyConnectedLayer(const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
+ const armnn::ConstTensor& weights,
+ armnn::ConstTensor* biases = nullptr,
+ const char* name = nullptr) {
+
+ if (biases) {
+ return $self->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(*biases), name);
+ } else {
+ return $self->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(), name);
+ }
+
+ }
+
+ %feature("docstring",
+ "
+ Adds a 2D Transpose Convolution layer to the network.
+
+ Args:
+ descriptor (TransposeConvolution2dDescriptor): Descriptor containing all parameters to configure this layer.
+ weights (ConstTensor): Tensor for the weights data.
+ biases (ConstTensor): Optional tensor for the bias data.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddTransposeConvolution2dLayer;
+ armnn::IConnectableLayer* AddTransposeConvolution2dLayer(const armnn::TransposeConvolution2dDescriptor& descriptor,
+ const armnn::ConstTensor& weights,
+ armnn::ConstTensor* biases = nullptr,
+ const char* name = nullptr){
+
+ if (biases) {
+ return $self->AddTransposeConvolution2dLayer(descriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(*biases), name);
+ } else {
+ return $self->AddTransposeConvolution2dLayer(descriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(), name);
+ }
+ }
+
+
+ %feature("docstring",
+ "
+ Adds a 2D Convolution layer to the network.
+
+ Args:
+ convolution2dDescriptor (Convolution2dDescriptor): Description of the 2D convolution layer.
+ weights (ConstTensor): Tensor for the weights data.
+ biases (ConstTensor): Optional tensor for the bias data. If specified, must match the output tensor shape.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddConvolution2dLayer;
+ armnn::IConnectableLayer* AddConvolution2dLayer(const armnn::Convolution2dDescriptor& convolution2dDescriptor,
+ const armnn::ConstTensor& weights,
+ armnn::ConstTensor* biases = nullptr,
+ const char* name = nullptr) {
+
+ if (biases) {
+ return $self->AddConvolution2dLayer(convolution2dDescriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(*biases), name);
+ } else {
+ return $self->AddConvolution2dLayer(convolution2dDescriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(), name);
+ }
+ }
+
+ %feature("docstring",
+ "
+ Adds a 2D Depthwise Convolution layer to the network.
+
+ Args:
+ convolution2dDescriptor (DepthwiseConvolution2dDescriptor): Description of the 2D depthwise convolution layer.
+ weights (ConstTensor): Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
+ biases (ConstTensor): Optional tensor for the bias data. If specified, must match the output tensor shape.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") AddDepthwiseConvolution2dLayer;
+
+ armnn::IConnectableLayer* AddDepthwiseConvolution2dLayer(
+ const armnn::DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+ const armnn::ConstTensor& weights,
+ const armnn::ConstTensor* biases = nullptr,
+ const char* name = nullptr) {
+
+ if (biases) {
+ return $self->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(*biases), name);
+ } else {
+ return $self->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
+ armnn::Optional<armnn::ConstTensor>(), name);
+ }
+ }
+}
+
+%feature("docstring",
+ "
+ Interface class for an optimzied network object. Optimized networks are obtained after running `Optimize` on
+ an `INetwork` object.
+ Optimized networks are passed to `EnqueueWorkload`.
+
+ Args:
+ convolution2dDescriptor (DepthwiseConvolution2dDescriptor): Description of the 2D depthwise convolution layer.
+ weights (ConstTensor): Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
+ biases (ConstTensor): Optional tensor for the bias data. If specified, must match the output tensor shape.
+ name (str): Optional name for the layer.
+
+ Returns:
+ IConnectableLayer: Interface for configuring the layer.
+ ") IOptimizedNetwork;
+%nodefaultctor IOptimizedNetwork;
+%nodefaultdtor IOptimizedNetwork;
+class IOptimizedNetwork
+{
+};
+
+%extend IOptimizedNetwork {
+
+ ~IOptimizedNetwork() {
+ armnn::IOptimizedNetwork::Destroy($self);
+ }
+
+ %feature("docstring",
+ "
+ Saves optimized network graph as dot file.
+
+ Args:
+ fileName (str): File path to save to.
+ Raises:
+ RuntimeError: If serialization failure.
+ ") SerializeToDot;
+
+ void SerializeToDot(const std::string& fileName) {
+ std::ofstream dot;
+ dot.open(fileName);
+ if(!dot.is_open())
+ {
+ throw armnn::Exception("Failed to open dot file");
+ } else {
+ armnn::Status status = $self->SerializeToDot(dot);
+ dot.close();
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception("Failed to serialize to dot");
+ }
+ }
+ };
+}
+}
+
+%{
+ std::pair<armnn::IOptimizedNetwork*, std::vector<std::string>> Optimize(const armnn::INetwork* network,
+ const std::vector<armnn::BackendId>& backendPreferences,
+ const armnn::IDeviceSpec& deviceSpec,
+ const armnn::OptimizerOptions& options = armnn::OptimizerOptions())
+ {
+ std::vector<std::string> errorMessages;
+ armnn::IOptimizedNetwork* optimizedNetwork = armnn::Optimize(*network, backendPreferences, deviceSpec,
+ options, armnn::Optional<std::vector<std::string> &>(errorMessages)).release();
+
+ if(!optimizedNetwork)
+ {
+ std::string errorString;
+
+ for (auto error : errorMessages) {
+ errorString.append(error);
+ }
+
+ throw armnn::Exception(errorString);
+ }
+
+ return std::make_pair(optimizedNetwork, errorMessages);
+ };
+%}
+
+%feature("docstring",
+ "
+ Create an optimized version of the given network.
+ Args:
+ network (INetwork): INetwork description of the network to be optimized.
+ backendPreferences (list): The choice of the backend ordered by user preferences. See `BackendId`.
+ deviceSpec (IDeviceSpec): DeviceSpec object as queried from the runtime. See `IRuntime.GetDeviceSpec`.
+ options (OptimizerOptions): Object with optimizer configuration options.
+
+ Returns:
+ tuple: (`IOptimizedNetwork`, a tuple of failures or warnings).
+
+ Raises:
+ RuntimeError: If process fails.
+ ") Optimize;
+
+%optimize_typemap_out;
+std::pair<armnn::IOptimizedNetwork*, std::vector<std::string>> Optimize(const armnn::INetwork* network,
+ const std::vector<armnn::BackendId>& backendPreferences,
+ const armnn::IDeviceSpec& deviceSpec,
+ const armnn::OptimizerOptions& options = OptimizerOptions());
+%clear_optimize_typemap_out;
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_profiler.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_profiler.i
new file mode 100644
index 0000000000..929a7a0006
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_profiler.i
@@ -0,0 +1,82 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/IProfiler.hpp"
+%}
+
+namespace armnn
+{
+
+%feature("docstring",
+"
+Interface for profiling Arm NN. See `IRuntime.GetProfiler`.
+
+IProfiler object allows you to enable profiling and get various profiling results.
+
+") IProfiler;
+%nodefaultctor IProfiler;
+%nodefaultdtor IProfiler;
+class IProfiler
+{
+public:
+
+ %feature("docstring",
+ "
+ Sets the profiler to start/stop profiling.
+
+ Args:
+ enableProfiling (bool): Flag to enable/disable profiling.
+
+ ") EnableProfiling;
+
+ void EnableProfiling(bool enableProfiling);
+
+ %feature("docstring",
+ "
+ Checks if profiling is enabled.
+
+ Returns:
+ bool: If profiling is enabled or not.
+
+ ") IsProfilingEnabled;
+
+ bool IsProfilingEnabled();
+};
+
+%extend IProfiler {
+
+ %feature("docstring",
+ "
+ Gets the string value of the profiling events analysis log.
+
+ Returns:
+ str: The profiling events analysis log.
+
+ ") event_log;
+
+ std::string event_log()
+ {
+ std::ostringstream oss;
+ $self->AnalyzeEventsAndWriteResults(oss);
+ return oss.str();
+ }
+
+ %feature("docstring",
+ "
+ Gets the profiling log as the JSON string.
+
+ Returns:
+ str: Profiling log as JSON formatted string.
+
+ ") as_json;
+
+ std::string as_json()
+ {
+ std::ostringstream oss;
+ $self->Print(oss);
+ return oss.str();
+ }
+}
+}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
new file mode 100644
index 0000000000..bbeda51d89
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
@@ -0,0 +1,254 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/IRuntime.hpp"
+#include <iostream>
+#include <ostream>
+#include <sstream>
+%}
+
+namespace std {
+ %template() pair<int, string>;
+ %template(IntPair) pair<int, int>;
+ %template(ConstTensorPair) pair<int, armnn::ConstTensor>;
+ %template(TensorPair) pair<int, armnn::Tensor>;
+
+ %template(InputTensorsVector) vector<pair<int, armnn::ConstTensor>>;
+ %template(OutputTensorsVector) vector<pair<int, armnn::Tensor>>;
+}
+
+%include <std_shared_ptr.i>
+
+%shared_ptr(IGpuAccTunedParameters);
+
+#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS
+
+%{
+typedef armnn::IRuntime::CreationOptions CreationOptions;
+%}
+
+struct CreationOptions
+{
+ %feature("docstring",
+ "
+ Structure for holding creation options. For majority of cases it is fine to leave values at default.
+
+ Contains:
+ m_GpuAccTunedParameters (IGpuAccTunedParameters): If set, uses the GpuAcc tuned parameters from the given object
+ when executing GPU workloads. It will also be updated with new
+ tuned parameters if it is configured to do so.
+
+ m_EnableGpuProfiling (bool): Setting this flag will allow the user to obtain GPU profiling information from
+ the runtime.
+
+ m_DynamicBackendsPath (string): Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS
+ compiler directive. Only a single path is allowed for the override.
+
+ ") CreationOptions;
+
+ CreationOptions();
+ std::shared_ptr<armnn::IGpuAccTunedParameters> m_GpuAccTunedParameters;
+ bool m_EnableGpuProfiling;
+ std::string m_DynamicBackendsPath;
+};
+
+namespace armnn
+{
+
+struct INetworkProperties
+{
+ %feature("docstring",
+ "
+ Structure for holding network properties.
+
+ Contains:
+ m_ImportEnabled (bool): Enable import.
+
+ m_ExportEnabled (bool): Enable export.
+
+ ") INetworkProperties;
+ INetworkProperties(bool importEnabled = false, bool exportEnabled = false);
+
+ const bool m_ImportEnabled;
+ const bool m_ExportEnabled;
+};
+
+%feature("docstring",
+"
+Interface for runtime objects.
+
+Runtime objects are responsible for performing inference on an `IOptimizedNetwork`.
+
+Args:
+ options (CreationOptions): CreationOptions data struct.
+
+") IRuntime;
+%nodefaultctor IRuntime;
+class IRuntime
+{
+public:
+
+ %ignore
+ armnn::IRuntime::UnloadNetwork(NetworkId networkId);
+
+ %ignore
+ armnn::IRuntime::EnqueueWorkload(NetworkId networkId,
+ const std::vector<std::pair<int, armnn::ConstTensor>>& inputTensors,
+ const std::vector<std::pair<int, armnn::Tensor>>& outputTensors);
+
+ %feature("docstring",
+ "
+ Get information relating to networks input tensor.
+
+ Args:
+ networkId (int): Unique ID of the network being run.
+ layerId (int): Unique ID of the input layer.
+
+ Returns:
+ TensorInfo: Information relating to the input tensor a network.
+ ") GetInputTensorInfo;
+ armnn::TensorInfo GetInputTensorInfo(int networkId, int layerId);
+
+ %feature("docstring",
+ "
+ Get information relating to networks output tensor.
+
+ Args:
+ networkId (int): Unique ID of the network being run.
+ layerId (int): Unique ID of the output layer.
+
+ Returns:
+ TensorInfo: Information relating to the output tensor a network.
+ ") GetOutputTensorInfo;
+ armnn::TensorInfo GetOutputTensorInfo(int networkId, int layerId);
+
+ %feature("docstring",
+ "
+ Get information relating supported compute backends on current device.
+
+ Returns:
+ IDeviceSpec: Device spec information detailing all supported backends on current platform.
+ ") GetDeviceSpec;
+ const IDeviceSpec& GetDeviceSpec();
+};
+
+%extend IRuntime {
+ //tell python to disown the IOptimizedNetwork pointer
+ //because IRuntime takes ownership
+ %typemap(in) armnn::IOptimizedNetwork* {
+ if (!SWIG_IsOK(SWIG_ConvertPtr($input, (void **) &$1, $1_descriptor, SWIG_POINTER_DISOWN))) {
+ SWIG_exception_fail(SWIG_TypeError, "in method '$symname', argument 2 of type armnn::IOptimizedNetwork*");
+ }
+ }
+
+ %feature("docstring",
+ "
+ Loads a complete network into the IRuntime.
+ The runtime takes ownership of the network once passed in.
+ Args:
+ network (IOptimizedNetwork): An optimized network to load into the IRuntime.
+ networkProperties (INetworkProperties): Properties that allows the user to opt-in to import/export behavior. Default: None.
+ Returns:
+ tuple: (int, str) Network id and non fatal failure or warning messsages.
+ Raises:
+ RuntimeError: If process fails.
+ ") LoadNetwork;
+
+ std::pair<int, std::string> LoadNetwork(armnn::IOptimizedNetwork* network,
+ const INetworkProperties* networkProperties = nullptr)
+ {
+ armnn::IOptimizedNetworkPtr netPtr(network, &armnn::IOptimizedNetwork::Destroy);
+ armnn::NetworkId networkIdOut;
+ std::string errorString;
+ armnn::Status status;
+
+ if (networkProperties) {
+ status = $self->LoadNetwork(networkIdOut, std::move(netPtr), errorString, *networkProperties);
+ } else {
+ status = $self->LoadNetwork(networkIdOut, std::move(netPtr), errorString);
+ }
+
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception(errorString);
+ }
+
+ auto net_id_int = static_cast<int>(networkIdOut);
+ return std::make_pair(net_id_int, errorString);
+ };
+
+ %typemap(in) armnn::IOptimizedNetwork*;
+ %feature("docstring",
+ "
+ Calling this function will perform an inference on your network.
+
+ Args:
+ networkId (int): Unique ID of the network to run.
+ inputTensors (list): A list of tuples (int, ConstTensor), see `make_input_tensors`.
+ outputTensors (list): A list of tuples (int, Tensor), see `make_output_tensors`.
+
+ ") EnqueueWorkload;
+ void EnqueueWorkload(int networkId, const std::vector<std::pair<int, armnn::ConstTensor>>& inputTensors,
+ const std::vector<std::pair<int, armnn::Tensor>>& outputTensors) {
+ armnn::Status status = $self->EnqueueWorkload(networkId, inputTensors, outputTensors);
+
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception("Failed to enqueue workload for network.");
+ }
+ };
+
+ %feature("docstring",
+ "
+ Unload a currently loaded network from the runtime.
+
+ Args:
+ networkId (int): Unique ID of the network to unload.
+
+ ") UnloadNetwork;
+ void UnloadNetwork(int networkId) {
+ armnn::Status status = $self->UnloadNetwork(networkId);
+ if(status == armnn::Status::Failure)
+ {
+ throw armnn::Exception("Failed to unload network.");
+ }
+ };
+
+ %feature("docstring",
+ "
+ Returns the IProfiler instance registered against the working thread, and stored on the loaded network.
+ Be aware that if the runtime has Unloaded the network, or if the runtime is destroyed,
+ that the IProfiler instance will also be destroyed, and will cause a segmentation fault.
+
+ Args:
+ networkId (int): The ID of the loaded network you want to profile.
+
+ Returns:
+ IProfiler: IProfiler instance the given loaded network has stored.
+
+ Raises:
+ RuntimeError: If no profiler is found.
+ ") GetProfiler;
+
+ armnn::IProfiler* GetProfiler(int networkId) {
+ std::shared_ptr<armnn::IProfiler> profiler = $self->GetProfiler(networkId);
+ if (nullptr == profiler) {
+ throw armnn::Exception("Failed to get profiler");
+ }
+ return profiler.get();
+ };
+
+ ~IRuntime() {
+ armnn::IRuntime::Destroy($self);
+ }
+
+ IRuntime(const CreationOptions& options) {
+ return armnn::IRuntime::CreateRaw(options);
+ }
+
+}
+
+}
+
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
new file mode 100644
index 0000000000..efa9a16352
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
@@ -0,0 +1,313 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/Tensor.hpp"
+%}
+
+%include <typemaps/tensor_memory.i>
+%include <typemaps/tensor_shape.i>
+
+namespace armnn
+{
+
+%feature("docstring",
+"
+Class for holding the shape information of an Arm NN tensor.
+
+") TensorShape;
+class TensorShape
+{
+public:
+ %tensor_shape_typemap(unsigned int numDimensions, const unsigned int* dimensionSizes);
+ TensorShape(unsigned int numDimensions, const unsigned int* dimensionSizes);
+ %clear_tensor_shape_typemap(unsigned int numDimensions, const unsigned int* dimensionSizes);
+
+ %feature("docstring",
+ "
+ Returns the number of dimensions in this TensorShape.
+
+ Returns:
+ int: The number of dimensions in this TensorShape.
+
+ ") GetNumDimensions;
+ unsigned int GetNumDimensions() const;
+
+ %feature("docstring",
+ "
+ Returns the total number of elements for a tensor with this TensorShape.
+
+ Returns:
+ int: The total number of elements for a tensor with this TensorShape.
+
+ ") GetNumElements;
+ unsigned int GetNumElements() const;
+
+};
+
+%extend TensorShape {
+
+ unsigned int __getitem__(unsigned int i) const {
+ return $self->operator[](i);
+ }
+ void __setitem__(unsigned int i, unsigned int val) {
+ $self->operator[](i) = val;
+ }
+
+ std::string __str__() {
+ std::string dim = "NumDimensions: " + std::to_string($self->GetNumDimensions());
+ std::string elm = "NumElements: " + std::to_string($self->GetNumElements());
+
+ std::string shapeStr = "TensorShape{Shape(";
+
+ auto numDimensions = $self->GetNumDimensions();
+ auto sizeDims = $self->GetNumDimensions();
+ for (unsigned int i = 0; i < numDimensions; i++) {
+ shapeStr += std::to_string($self->operator[](i));
+
+ if (sizeDims - 1 > 0) {
+ shapeStr += ", ";
+ }
+ sizeDims--;
+ }
+ shapeStr = shapeStr + "), " + dim + ", " + elm + "}";
+ return shapeStr;
+ }
+
+}
+
+
+%feature("docstring",
+"
+Class for holding the tensor information of an Arm NN tensor such as quantization, datatype, shape etc.
+
+") TensorInfo;
+class TensorInfo
+{
+public:
+ TensorInfo();
+
+ TensorInfo(const TensorInfo& other);
+
+ TensorInfo(const TensorShape& shape, DataType dataType,
+ float quantizationScale = 0.0f, int32_t quantizationOffset = 0);
+
+ %feature("docstring",
+ "
+ Get the tensor shape.
+
+ Return:
+ TensorShape: Current shape of the tensor.
+
+ ") GetShape;
+ TensorShape& GetShape();
+
+ %feature("docstring",
+ "
+ Set the tensor shape. Must have the same number of elements as current tensor.
+
+ Args:
+ newShape (TensorShape): New tensor shape to reshape to.
+
+ ") SetShape;
+ void SetShape(const TensorShape& newShape);
+
+ %feature("docstring",
+ "
+ Returns the number of dimensions in this Tensor.
+
+ Returns:
+ int: The number of dimensions in this Tensor.
+
+ ") GetNumDimensions;
+ unsigned int GetNumDimensions() const;
+
+ %feature("docstring",
+ "
+ Returns the total number of elements for this Tensor.
+
+ Returns:
+ int: The total number of elements for this Tensor.
+
+ ") GetNumElements;
+ unsigned int GetNumElements() const;
+
+ %feature("docstring",
+ "
+ Get the tensor datatype.
+
+ Returns:
+ DataType: Current tensor DataType.
+
+ ") GetDataType;
+ DataType GetDataType() const;
+
+ %feature("docstring",
+ "
+ Set the tensor datatype.
+
+ Args:
+ type (DataType): DataType to set the tensor to.
+
+ ") SetDataType;
+ void SetDataType(DataType type);
+
+ %feature("docstring",
+ "
+ Get the value of the tensors quantization scale.
+
+ Returns:
+ float: Tensor quantization scale value.
+
+ ") GetQuantizationScale;
+ float GetQuantizationScale() const;
+
+ %feature("docstring",
+ "
+ Get the value of the tensors quantization offset.
+
+ Returns:
+ int: Tensor quantization offset value.
+
+ ") GetQuantizationOffset;
+ int32_t GetQuantizationOffset() const;
+
+ %feature("docstring",
+ "
+ Set the value of the tensors quantization scale.
+
+ Args:
+ scale (float): Scale value to set.
+
+ ") SetQuantizationScale;
+ void SetQuantizationScale(float scale);
+
+ %feature("docstring",
+ "
+ Set the value of the tensors quantization offset.
+
+ Args:
+ offset (int): Offset value to set.
+
+ ") SetQuantizationOffset;
+ void SetQuantizationOffset(int32_t offset);
+
+ %feature("docstring",
+ "
+ Returns true if the tensor is a quantized data type.
+
+ Returns:
+ bool: True if the tensor is a quantized data type.
+
+ ") IsQuantized;
+ bool IsQuantized() const;
+
+
+
+ %feature("docstring",
+ "
+ Check that the types are the same and, if quantize, that the quantization parameters are the same.
+
+ Returns:
+ bool: True if matched, else False.
+
+ ") IsTypeSpaceMatch;
+ bool IsTypeSpaceMatch(const TensorInfo& other) const;
+
+ %feature("docstring",
+ "
+ Get the number of bytes needed for this tensor.
+
+ Returns:
+ int: Number of bytes consumed by this tensor.
+
+ ") GetNumBytes;
+ unsigned int GetNumBytes() const;
+
+};
+
+%extend TensorInfo {
+
+ std::string __str__() {
+ const std::string tmp = "TensorInfo{DataType: " + std::to_string(static_cast<int>($self->GetDataType()))
+ + ", IsQuantized: " + std::to_string($self->IsQuantized())
+ + ", QuantizationScale: " + std::to_string( $self->GetQuantizationScale())
+ + ", QuantizationOffset: " + std::to_string($self->GetQuantizationOffset())
+ + ", NumDimensions: " + std::to_string($self->GetNumDimensions())
+ + ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
+ return tmp;
+ }
+
+}
+
+class Tensor
+{
+public:
+ ~Tensor();
+ Tensor();
+ Tensor(const Tensor& other);
+
+ %mutable_memory(void* memory);
+ Tensor(const TensorInfo& info, void* memory);
+ %clear_mutable_memory(void* memory);
+
+ const TensorInfo& GetInfo() const;
+ const TensorShape& GetShape() const;
+
+ DataType GetDataType() const;
+ unsigned int GetNumDimensions() const;
+ unsigned int GetNumBytes() const;
+ unsigned int GetNumElements() const;
+
+ /* we want to disable getting the memory area from here - forcing use of get_memory_area() in public api.
+ void* GetMemoryArea() const;*/
+};
+
+%extend Tensor {
+
+ std::string __str__() {
+ const std::string tmp = "Tensor{DataType: " + std::to_string(static_cast<int>($self->GetDataType()))
+ + ", NumBytes: " + std::to_string($self->GetNumBytes())
+ + ", NumDimensions: " + std::to_string( $self->GetNumDimensions())
+ + ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
+ return tmp;
+ }
+}
+
+class ConstTensor
+{
+public:
+ ~ConstTensor();
+ ConstTensor();
+ ConstTensor(const Tensor& other);
+ ConstTensor(const ConstTensor& other);
+
+ %const_memory(const void* memory);
+ ConstTensor(const TensorInfo& info, const void* memory);
+ %clear_const_memory(const void* memory);
+
+ const TensorInfo& GetInfo() const;
+ const TensorShape& GetShape() const;
+
+ DataType GetDataType() const;
+ unsigned int GetNumDimensions() const;
+ unsigned int GetNumBytes() const;
+ unsigned int GetNumElements() const;
+
+ /* we want to disable getting the memory area from here - forcing use of get_memory_area() in public api.
+ void* GetMemoryArea() const;*/
+};
+
+%extend ConstTensor {
+
+ std::string __str__() {
+ const std::string tmp = "ConstTensor{DataType: " + std::to_string(static_cast<int>($self->GetDataType()))
+ + ", NumBytes: " + std::to_string($self->GetNumBytes())
+ + ", NumDimensions: " + std::to_string( $self->GetNumDimensions())
+ + ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
+ return tmp;
+ }
+}
+
+}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
new file mode 100644
index 0000000000..50afda9fd3
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
@@ -0,0 +1,136 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/Types.hpp"
+%}
+
+%include <typemaps/permutation_vector.i>
+
+
+namespace armnn
+{
+
+%feature("docstring",
+"
+Vector used to permute a tensor.
+
+For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
+which is to be passed as an input to Arm NN, each source dimension is mapped to the corresponding
+Arm NN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
+to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
+Channels (2 -> 3 and 3 -> 1). This will lead to m_DimMappings pointing to the following array:
+[ 0, 2, 3, 1 ].
+
+Note that the mapping should be reversed if considering the case of Arm NN 4-d outputs (Batch Element,
+Channels, Height, Width) being written to a destination with the format mentioned above. We now have
+0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following m_DimMappings contents:
+[ 0, 3, 1, 2 ].
+
+Args:
+ dimMappings (list): Indicates how to translate tensor elements from a given source into the target destination,
+ when source and target potentially have different memory layouts.
+") PermutationVector;
+
+class PermutationVector
+{
+public:
+ using ValueType = unsigned int;
+ using SizeType = unsigned int;
+
+ %permutation_vector_typemap(const ValueType *dimMappings, SizeType numDimMappings);
+ PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
+ %clear_permutation_vector_typemap(const ValueType *dimMappings, SizeType numDimMappings);
+
+
+ %feature("docstring",
+ "
+ Get the PermutationVector size.
+
+ Return:
+ SizeType: Current size of the PermutationVector.
+
+ ") GetSize;
+ SizeType GetSize();
+
+ %feature("docstring",
+ "
+ Checks if a specified permutation vector is its inverse
+
+ Return:
+ bool: returns true if the specified Permutation vector is its inverse.
+
+ ") IsInverse;
+ bool IsInverse(const PermutationVector& other);
+};
+
+%extend PermutationVector {
+
+ unsigned int __getitem__(unsigned int i) const {
+ return $self->operator[](i);
+ }
+
+ bool __eq__(PermutationVector other) {
+ int size = $self->GetSize();
+ int otherSize = other.GetSize();
+ if(size != otherSize)
+ {
+ return false;
+ }
+ for(int i = 0; i < size; ++i){
+ if($self->operator[](i) != other[i])
+ {
+ return false;
+ }
+ return true;
+ }
+ return true;
+ }
+}
+
+}
+%feature("docstring",
+"
+Interface for device specifications. Main use is to get information relating to what compute capability the device being used has.
+") IDeviceSpec;
+
+
+%feature("docstring",
+"
+Returns the backends supported by this compute device.
+
+Returns:
+ set: This devices supported backends.
+
+") GetSupportedBackends;
+
+%ignore ProfilingGuid;
+%ignore PermutationVector;
+%include "armnn/Types.hpp"
+
+%extend armnn::IDeviceSpec {
+
+
+ std::string __str__() {
+
+ std::string deviceStr = "IDeviceSpec { supportedBackends: [";
+
+ auto bends = $self->GetSupportedBackends();
+ auto sizeBends = $self->GetSupportedBackends().size();
+ for (std::unordered_set<armnn::BackendId>::const_iterator p = bends.begin(); p != bends.end(); ++p) {
+
+ deviceStr += p->Get();
+
+ if (sizeBends - 1 > 0) {
+ deviceStr += ", ";
+ }
+ sizeBends--;
+
+ }
+ deviceStr = deviceStr + "]}";
+
+ return deviceStr;
+ }
+
+}
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types_utils.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types_utils.i
new file mode 100644
index 0000000000..c11d9927c9
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types_utils.i
@@ -0,0 +1,26 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%{
+#include "armnn/TypesUtils.hpp"
+%}
+
+namespace armnn
+{
+ constexpr unsigned int GetDataTypeSize(DataType dataType);
+
+ constexpr const char* GetDataTypeName(DataType dataType);
+
+ template<typename QuantizedType>
+ QuantizedType Quantize(float value, float scale, int32_t offset);
+ %template(Quantize_uint8_t) Quantize<uint8_t>;
+ %template(Quantize_int16_t) Quantize<int16_t>;
+ %template(Quantize_int32_t) Quantize<int32_t>;
+
+ template <typename QuantizedType>
+ float Dequantize(QuantizedType value, float scale, int32_t offset);
+ %template(Dequantize_uint8_t) Dequantize<uint8_t>;
+ %template(Dequantize_int16_t) Dequantize<int16_t>;
+ %template(Dequantize_int32_t) Dequantize<int32_t>;
+}
diff --git a/python/pyarmnn/src/pyarmnn/swig/standard_header.i b/python/pyarmnn/src/pyarmnn/swig/standard_header.i
new file mode 100644
index 0000000000..c412dc3bff
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/standard_header.i
@@ -0,0 +1,53 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%include "stl.i"
+%include "cstring.i"
+%include "std_string.i"
+%include "std_vector.i"
+%include "std_unordered_set.i"
+%include "std_pair.i"
+%include "stdint.i"
+%include "carrays.i"
+%include "exception.i"
+%include "typemaps.i"
+%include "std_iostream.i"
+
+%ignore *::operator=;
+%ignore *::operator[];
+
+
+// Define exception typemap to wrap armnn exception into python exception.
+
+%exception{
+ try {
+ $action
+ } catch (armnn::Exception &e) {
+ SWIG_exception(SWIG_RuntimeError, const_cast<char*>(e.what()));
+ }
+};
+
+%exception __getitem__ {
+ try {
+ $action
+ } catch (armnn::InvalidArgumentException &e) {
+ SWIG_exception(SWIG_ValueError, const_cast<char*>(e.what()));
+ } catch (const std::out_of_range &e) {
+ SWIG_exception(SWIG_IndexError, const_cast<char*>(e.what()));
+ } catch (const std::exception &e) {
+ SWIG_exception(SWIG_RuntimeError, const_cast<char*>(e.what()));
+ }
+};
+
+%exception __setitem__ {
+ try {
+ $action
+ } catch (armnn::InvalidArgumentException &e) {
+ SWIG_exception(SWIG_ValueError, const_cast<char*>(e.what()));
+ } catch (const std::out_of_range &e) {
+ SWIG_exception(SWIG_IndexError, const_cast<char*>(e.what()));
+ } catch (const std::exception &e) {
+ SWIG_exception(SWIG_RuntimeError, const_cast<char*>(e.what()));
+ }
+};
diff --git a/python/pyarmnn/src/pyarmnn/swig/typemaps/network_optimize.i b/python/pyarmnn/src/pyarmnn/swig/typemaps/network_optimize.i
new file mode 100644
index 0000000000..05df82bdd1
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/typemaps/network_optimize.i
@@ -0,0 +1,41 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%define %optimize_typemap_out
+ %typemap(out) (std::pair<armnn::IOptimizedNetwork*, std::vector<std::string>>) {
+ PyObject * network = SWIG_NewPointerObj(SWIG_as_voidptr($1.first), SWIGTYPE_p_armnn__IOptimizedNetwork, SWIG_POINTER_OWN);
+ $result = PyTuple_New(2);
+
+ // Convert vector to fixed-size tuple
+ std::vector<std::string> strings = $1.second;
+ Py_ssize_t size = strings.size();
+
+ // New reference. Need to Py_DECREF
+ PyObject* errMsgTuple = PyTuple_New(size);
+
+ if (!errMsgTuple) {
+ Py_XDECREF(errMsgTuple);
+ return PyErr_NoMemory();
+ }
+
+ for (Py_ssize_t i = 0; i < size; i++) {
+ // New reference. Need to Py_DECREF
+ PyObject *string = PyString_FromString(strings[i].c_str());
+
+ if (!string) {
+ Py_XDECREF(string);
+ return PyErr_NoMemory();
+ }
+ PyTuple_SetItem(errMsgTuple, i, string);
+ }
+
+ // Create result tuple
+ PyTuple_SetItem($result, 0, network);
+ PyTuple_SetItem($result, 1, errMsgTuple);
+ }
+%enddef
+
+%define %clear_optimize_typemap_out
+ %typemap(out) (std::pair<armnn::IOptimizedNetwork*, std::vector<std::string>>)
+%enddef
diff --git a/python/pyarmnn/src/pyarmnn/swig/typemaps/permutation_vector.i b/python/pyarmnn/src/pyarmnn/swig/typemaps/permutation_vector.i
new file mode 100644
index 0000000000..daa9663fb1
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/typemaps/permutation_vector.i
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%define %permutation_vector_typemap(TYPE1, TYPE2)
+ %typemap(in) (TYPE1, TYPE2) {
+ if (PyTuple_Check($input)) {
+ PyObject* seq = $input;
+
+ $2 = PySequence_Fast_GET_SIZE(seq);
+ $1 = (unsigned int*)PyMem_RawMalloc($2*sizeof(unsigned int));
+
+
+ if(!$1) {
+ PyErr_NoMemory();
+ SWIG_fail;
+ }
+ int size = (int)$2;
+ for(int i=0; i < size; i++) {
+ PyObject *longItem;
+ // Borrowed reference. No need to Py_DECREF
+ PyObject *item = PySequence_Fast_GET_ITEM(seq, i);
+ if(!item) {
+ PyErr_SetString(PyExc_TypeError, "Failed to read data from tuple");
+ SWIG_fail;
+ }
+ // New reference. Need to Py_DECREF
+ longItem = PyNumber_Long(item);
+ if(!longItem) {
+ Py_XDECREF(longItem);
+ PyErr_SetString(PyExc_TypeError, "All elements must be numbers");
+ SWIG_fail;
+ }
+ $1[i] = (unsigned int)PyLong_AsUnsignedLong(longItem);
+ Py_XDECREF(longItem);
+ }
+
+ } else {
+ PyErr_SetString(PyExc_TypeError, "Argument is not a tuple");
+ SWIG_fail;
+ }
+ }
+
+ %typemap(freearg) (TYPE1, TYPE2) {
+ PyMem_RawFree($1);
+ }
+%enddef
+
+%define %clear_permutation_vector_typemap(TYPE1, TYPE2)
+ %typemap(in) (TYPE1, TYPE2);
+ %typemap(freearg) (TYPE1, TYPE2);
+%enddef
diff --git a/python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_memory.i b/python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_memory.i
new file mode 100644
index 0000000000..de38a63b97
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_memory.i
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%define %mutable_memory(TYPEMAP)
+ %typemap(in) (TYPEMAP) {
+ int res; void *buf = 0;
+ Py_buffer view;
+ res = PyObject_GetBuffer($input, &view, PyBUF_WRITABLE);
+ buf = view.buf;
+ PyBuffer_Release(&view);
+ if (res < 0) {
+ PyErr_Clear();
+ %argument_fail(res, "(TYPEMAP)", $symname, $argnum);
+ }
+ $1 = buf;
+ }
+
+ %typemap(typecheck) (TYPEMAP) {
+ $1 = PyObject_CheckBuffer($input) || PyTuple_Check($input) ? 1 : 0;
+ }
+%enddef
+
+%define %clear_mutable_memory(TYPEMAP)
+ %typemap(in) (TYPEMAP);
+ %typemap(typecheck) (TYPEMAP);
+%enddef
+
+%define %const_memory(TYPEMAP)
+ %typemap(in) (TYPEMAP) {
+ int res; void *buf = 0;
+ Py_buffer view;
+ res = PyObject_GetBuffer($input, &view, PyBUF_CONTIG_RO);
+ buf = view.buf;
+ PyBuffer_Release(&view);
+ if (res < 0) {
+ PyErr_Clear();
+ %argument_fail(res, "(TYPEMAP)", $symname, $argnum);
+ }
+ $1 = buf;
+ }
+
+ %typemap(typecheck) (TYPEMAP) {
+ $1 = PyObject_CheckBuffer($input) || PyTuple_Check($input) ? 1 : 0;
+ }
+%enddef
+
+%define %clear_const_memory(TYPEMAP)
+ %typemap(in) (TYPEMAP);
+ %typemap(typecheck) (TYPEMAP);
+%enddef
+
diff --git a/python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_shape.i b/python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_shape.i
new file mode 100644
index 0000000000..3e7c98f4c6
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/typemaps/tensor_shape.i
@@ -0,0 +1,51 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%define %tensor_shape_typemap(TYPE1, TYPE2)
+ %typemap(in) (TYPE1, TYPE2) {
+ if (PyTuple_Check($input)) {
+ PyObject* seq = $input;
+
+ $1 = PySequence_Fast_GET_SIZE(seq);
+ $2 = (unsigned int*)PyMem_RawMalloc($1*sizeof(unsigned int));
+
+ if(!$2) {
+ PyErr_NoMemory();
+ SWIG_fail;
+ }
+ int size = (int)$1;
+ for(int i=0; i < size; i++) {
+ PyObject *longItem;
+ // Borrowed reference. No need to Py_DECREF
+ PyObject *item = PySequence_Fast_GET_ITEM(seq, i);
+ if(!item) {
+ PyErr_SetString(PyExc_TypeError, "Failed to read data from tuple");
+ SWIG_fail;
+ }
+ // New reference. Need to Py_DECREF
+ longItem = PyNumber_Long(item);
+ if(!longItem) {
+ Py_XDECREF(longItem);
+ PyErr_SetString(PyExc_TypeError, "All elements must be numbers");
+ SWIG_fail;
+ }
+ $2[i] = (unsigned int)PyLong_AsUnsignedLong(longItem);
+ Py_XDECREF(longItem);
+ }
+
+ } else {
+ PyErr_SetString(PyExc_TypeError, "Argument is not a tuple");
+ SWIG_fail;
+ }
+ }
+
+ %typemap(freearg) (TYPE1, TYPE2) {
+ PyMem_RawFree($2);
+ }
+%enddef
+
+%define %clear_tensor_shape_typemap(TYPE1, TYPE2)
+ %typemap(in) (TYPE1, TYPE2);
+ %typemap(freearg) (TYPE1, TYPE2);
+%enddef
diff --git a/python/pyarmnn/src/pyarmnn/swig/typemaps/vectors.i b/python/pyarmnn/src/pyarmnn/swig/typemaps/vectors.i
new file mode 100644
index 0000000000..1566bb0c3b
--- /dev/null
+++ b/python/pyarmnn/src/pyarmnn/swig/typemaps/vectors.i
@@ -0,0 +1,235 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+%inline %{
+//-------------------------from_python_to_cpp-----------------------------
+ int from_python_to_cpp(PyObject *obj, long* val) {
+ return SWIG_AsVal_long(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, int* val) {
+ return SWIG_AsVal_int(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, unsigned int* val) {
+ return SWIG_AsVal_unsigned_SS_int(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, unsigned short* val) {
+ return SWIG_AsVal_unsigned_SS_short(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, float* val) {
+ return SWIG_AsVal_float(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, double* val) {
+ return SWIG_AsVal_double(obj, val);
+ }
+#ifdef SWIG_LONG_LONG_AVAILABLE
+ int from_python_to_cpp(PyObject *obj, unsigned long long* val) {
+ return SWIG_AsVal_unsigned_SS_long_SS_long(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, long long* val) {
+ return SWIG_AsVal_long_SS_long(obj, val);
+ }
+#endif
+
+ int from_python_to_cpp(PyObject *obj, unsigned long* val) {
+ return SWIG_AsVal_unsigned_SS_long(obj, val);
+ }
+
+ int from_python_to_cpp(PyObject *obj, short* val) {
+ return SWIG_AsVal_short(obj, val);
+ }
+//-------------------------from_cpp_to_python-----------------------------
+ PyObject* from_cpp_to_python(long& val){
+ return PyLong_FromLong(val);
+ }
+
+ PyObject* from_cpp_to_python(unsigned long& val){
+ return PyLong_FromUnsignedLong(val);
+ }
+#ifdef SWIG_LONG_LONG_AVAILABLE
+ PyObject* from_cpp_to_python(long long& val){
+ return PyLong_FromLongLong(val);
+ }
+
+ PyObject* from_cpp_to_python(unsigned long long& val){
+ return PyLong_FromUnsignedLongLong(val);
+ }
+#endif
+
+ PyObject* from_cpp_to_python(int& val){
+ return PyLong_FromLong(static_cast<long>(val));
+ }
+
+ PyObject* from_cpp_to_python(unsigned int& val){
+ return PyLong_FromUnsignedLong(static_cast<unsigned long>(val));
+ }
+
+ PyObject* from_cpp_to_python(unsigned short& val){
+ return PyLong_FromUnsignedLong(static_cast<unsigned long>(val));
+ }
+
+ PyObject* from_cpp_to_python(float& val){
+ return PyFloat_FromDouble(static_cast<double>(val));
+ }
+
+ PyObject* from_cpp_to_python(double& val){
+ return PyFloat_FromDouble(val);
+ }
+
+ template<class U, class V>
+ PyObject* from_cpp_to_python(std::pair<U, V>& pair){
+
+ PyObject* first = from_cpp_to_python(pair.first);
+ PyObject* second = from_cpp_to_python(pair.second);
+
+ PyObject* localTuple = PyTuple_New(2);
+
+ if (!localTuple) {
+ Py_XDECREF(localTuple);
+ return PyErr_NoMemory();
+ }
+
+ PyTuple_SetItem(localTuple, 0, first);
+ PyTuple_SetItem(localTuple, 1, second);
+
+ return localTuple;
+ }
+
+ template<class K, class V>
+ static int from_python_to_cpp(PyObject* tuple, std::pair<K,V>* out) {
+
+ if (PyTuple_Check(tuple)) {
+
+ auto size = PyTuple_Size(tuple);
+
+ if (size != 2) {
+ return SWIG_ValueError;
+ }
+
+ PyObject* firstPy = PyTuple_GetItem(tuple, 0);
+ PyObject* secondPy = PyTuple_GetItem(tuple, 1);
+
+ if (!SWIG_IsOK(from_python_to_cpp(firstPy, &out->first))) {
+ return SWIG_TypeError;
+ }
+
+ if (!SWIG_IsOK(from_python_to_cpp(secondPy, &out->second))) {
+ return SWIG_TypeError;
+ }
+
+ } else {
+ return SWIG_TypeError;
+ }
+
+ return SWIG_OK;
+ }
+//---------------std::vector <-> python list ---------------------
+ template<class T>
+ static PyObject* from_vector_to_python(std::vector<T>* input) {
+ Py_ssize_t size = input->size();
+ PyObject* localList = PyList_New(size);
+
+ if (!localList) {
+ Py_XDECREF(localList);
+ return PyErr_NoMemory();
+ }
+
+ for(Py_ssize_t i = 0; i < size; ++i) {
+
+ PyObject* obj = from_cpp_to_python(input->at(i));
+
+ PyList_SET_ITEM(localList, i, obj);
+ }
+ return localList;
+ }
+
+ template<class T>
+ int from_python_to_vector(PyObject* seq, std::vector<T>& out) {
+ Py_ssize_t size = PySequence_Fast_GET_SIZE(seq);
+
+ for(Py_ssize_t i=0; i < size; i++) {
+ PyObject *item = PySequence_Fast_GET_ITEM(seq, i);
+ if(!item) {
+ PyErr_SetString(PyExc_TypeError, "Failed to read data from given sequence");
+
+ return SWIG_NullReferenceError;
+ }
+
+ T element;
+ int res = from_python_to_cpp(item, &element);
+ if (!SWIG_IsOK(res)) {
+ PyObject* itemRepr = PyObject_Repr(item);
+ PyObject* itemStrObj = PyUnicode_AsEncodedString(itemRepr, "utf-8", "replace");
+ const char* itemStr = PyBytes_AS_STRING(itemStrObj);
+
+ auto pythonType = Py_TYPE(item)->tp_name;
+
+ PyErr_Format(PyExc_TypeError, "Failed to convert python input value %s of type '%s' to C type '%s'", itemStr, pythonType, typeid(T).name());
+ Py_XDECREF(itemStrObj);
+ Py_XDECREF(itemRepr);
+ Py_DECREF(seq);
+ return SWIG_TypeError;
+ }
+ out.push_back(element);
+ }
+ return SWIG_OK;
+ }
+
+%}
+
+%define %list_to_vector(TYPEMAP...)
+
+// this typemap works for struct argument set
+ %typemap(in) TYPEMAP* (TYPEMAP tmp) {
+ if (PySequence_Check($input)) {
+
+ if (from_python_to_vector($input, tmp) < 0) {
+ SWIG_fail;
+ }
+
+ $1 = &tmp;
+
+ } else {
+ PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol, implement __getitem__() method.");
+ SWIG_fail;
+ }
+ }
+
+// this typemap works for constructor
+ %typemap(in) TYPEMAP {
+ if (PySequence_Check($input)) {
+ if (from_python_to_vector($input, $1) < 0){
+ SWIG_fail;
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol, implement __getitem__() method.");
+ SWIG_fail;
+ }
+ }
+
+// this typemap works for struct argument get
+
+ %typemap(out) TYPEMAP* {
+ $result = from_vector_to_python($1);
+ }
+
+// this typemap works for overloaded methods and ctors
+ %typemap(typecheck) (TYPEMAP) {
+ $1 = PySequence_Check($input) ? 1 : 0;
+ }
+
+%enddef
+
+%define %list_to_vector_clear(TYPEMAP...)
+ %typemap(in) (TYPEMAP);
+ %typemap(in) TYPEMAP* (TYPEMAP tmp);
+ %typemap(typecheck) (TYPEMAP);
+ %typemap(out) TYPEMAP*;
+%enddef
+
diff --git a/python/pyarmnn/swig_generate.py b/python/pyarmnn/swig_generate.py
new file mode 100755
index 0000000000..56cabbc6d8
--- /dev/null
+++ b/python/pyarmnn/swig_generate.py
@@ -0,0 +1,64 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+"""
+This script executes SWIG commands to generate armnn and armnn version wrappers.
+This script cannot be moved to ./script dir because it uses find_armnn function from setup.py script.
+Both scripts must be in the same folder.
+"""
+import os
+import re
+import subprocess
+from pathlib import Path
+
+from setup import find_includes
+
+__current_dir = Path(__file__).parent.absolute()
+
+
+def check_swig_versoin(version: str):
+ proc = subprocess.Popen(["swig -version"],
+ stdout=subprocess.PIPE, shell=True)
+ result = proc.communicate()[0].decode("utf-8")
+
+ pattern = re.compile(r"(?<=Version ).+(?=$)", re.MULTILINE)
+ match = pattern.search(result)
+
+ if match:
+ version_string = match.group(0).strip()
+ print(f"Swig version = {version_string}")
+ return version_string.startswith(version)
+ else:
+ print(f"Failed to find version string in 'swig -version':\n {result}")
+ return False
+
+
+def generate_wrap(name, extr_includes):
+ print(f'\nGenerating wrappers for {name}\n')
+
+ code = os.system(f"swig -v -c++ -python"
+ f" -Wall"
+ f" -o {__current_dir}/src/pyarmnn/_generated/{name}_wrap.cpp "
+ f"-outdir {__current_dir}/src/pyarmnn/_generated "
+ f"{extr_includes} "
+ f"-I{__current_dir}/src/pyarmnn/swig "
+ f"{__current_dir}/src/pyarmnn/swig/{name}.i")
+
+ if code != 0:
+ raise RuntimeError(f"Failed to generate {name} ext.")
+
+
+if __name__ == "__main__":
+ if not check_swig_versoin('4.'):
+ raise RuntimeError("Wrong swig version was found. Expected SWIG version is 4.x.x")
+
+ armnn_includes = find_includes()
+
+ generate_wrap('armnn_version', f"-I{'-I'.join(armnn_includes)} ")
+ generate_wrap('armnn', f"-I{'-I'.join(armnn_includes)} ")
+
+ generate_wrap('armnn_caffeparser', f"-I{'-I'.join(armnn_includes)} ")
+ generate_wrap('armnn_onnxparser', f"-I{'-I'.join(armnn_includes)} ")
+ generate_wrap('armnn_tfparser', f"-I{'-I'.join(armnn_includes)} ")
+ generate_wrap('armnn_tfliteparser', f"-I{'-I'.join(armnn_includes)} ")
+
+
diff --git a/python/pyarmnn/test/test_caffe_parser.py b/python/pyarmnn/test/test_caffe_parser.py
new file mode 100644
index 0000000000..6780f64b9b
--- /dev/null
+++ b/python/pyarmnn/test/test_caffe_parser.py
@@ -0,0 +1,133 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+import pyarmnn as ann
+import numpy as np
+
+
+@pytest.fixture()
+def parser(shared_data_folder):
+ """
+ Parse and setup the test network (alexnet) to be used for the tests below
+ """
+
+ # Create caffe parser
+ parser = ann.ICaffeParser()
+
+ # Specify path to model
+ path_to_model = os.path.join(shared_data_folder, 'squeezenet_v1.1_armnn.caffemodel')
+
+ # Specify the tensor shape relative to the input [1, 3, 227, 227]
+ tensor_shape = {'data': ann.TensorShape((1, 3, 227, 227))}
+
+ # Specify the requested_outputs
+ requested_outputs = ["prob"]
+
+ # Parse tf binary & create network
+ parser.CreateNetworkFromBinaryFile(path_to_model, tensor_shape, requested_outputs)
+
+ yield parser
+
+
+def test_caffe_parser_swig_destroy():
+ assert ann.ICaffeParser.__swig_destroy__, "There is a swig python destructor defined"
+ assert ann.ICaffeParser.__swig_destroy__.__name__ == "delete_ICaffeParser"
+
+
+def test_check_caffe_parser_swig_ownership(parser):
+ # Check to see that SWIG has ownership for parser. This instructs SWIG to take
+ # ownership of the return value. This allows the value to be automatically
+ # garbage-collected when it is no longer in use
+ assert parser.thisown
+
+
+def test_get_network_input_binding_info(parser):
+ input_binding_info = parser.GetNetworkInputBindingInfo("data")
+
+ tensor = input_binding_info[1]
+ assert tensor.GetDataType() == 1
+ assert tensor.GetNumDimensions() == 4
+ assert tensor.GetNumElements() == 154587
+
+
+def test_get_network_output_binding_info(parser):
+ output_binding_info1 = parser.GetNetworkOutputBindingInfo("prob")
+
+ # Check the tensor info retrieved from GetNetworkOutputBindingInfo
+ tensor1 = output_binding_info1[1]
+
+ assert tensor1.GetDataType() == 1
+ assert tensor1.GetNumDimensions() == 4
+ assert tensor1.GetNumElements() == 1000
+
+
+@pytest.mark.skip("Skipped. Currently there is a bug in armnn (RecordByRecordCaffeParser). To be enabled it once fixed.")
+def test_filenotfound_exception(shared_data_folder):
+ parser = ann.ICaffeParser()
+
+ # path to model
+ path_to_model = os.path.join(shared_data_folder, 'some_unknown_network.caffemodel')
+
+ # generic tensor shape [1, 1, 1, 1]
+ tensor_shape = {'data': ann.TensorShape((1, 1, 1, 1))}
+
+ # requested_outputs
+ requested_outputs = [""]
+
+ with pytest.raises(RuntimeError) as err:
+ parser.CreateNetworkFromBinaryFile(path_to_model, tensor_shape, requested_outputs)
+
+ # Only check for part of the exception since the exception returns
+ # absolute path which will change on different machines.
+ assert 'Failed to open graph file' in str(err.value)
+
+
+def test_caffe_parser_end_to_end(shared_data_folder):
+ parser = ann.ICaffeParser = ann.ICaffeParser()
+
+ # Load the network specifying the inputs and outputs
+ input_name = "data"
+ tensor_shape = {input_name: ann.TensorShape((1, 3, 227, 227))}
+ requested_outputs = ["prob"]
+
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'squeezenet_v1.1_armnn.caffemodel'),
+ tensor_shape, requested_outputs)
+
+ # Specify preferred backend
+ preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
+
+ input_binding_info = parser.GetNetworkInputBindingInfo(input_name)
+
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ assert 0 == len(messages)
+
+ net_id, messages = runtime.LoadNetwork(opt_network)
+
+ assert "" == messages
+
+ # Load test image data stored in golden_input.npy
+ input_tensor_data = np.load(os.path.join(shared_data_folder, 'caffe_parser/squeezenet_v1_1_input.npy'))
+ input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
+
+ # Load output binding info and
+ outputs_binding_info = []
+ for output_name in requested_outputs:
+ outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(output_name))
+ output_tensors = ann.make_output_tensors(outputs_binding_info)
+
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+ output_vectors = []
+
+ output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
+
+ # Load golden output file to compare the output results with
+ expected_output = np.load(os.path.join(shared_data_folder, 'caffe_parser/squeezenet_v1_1_output.npy'))
+
+ # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
+ np.testing.assert_almost_equal(output_vectors, expected_output, 4)
diff --git a/python/pyarmnn/test/test_const_tensor.py b/python/pyarmnn/test/test_const_tensor.py
new file mode 100644
index 0000000000..b0c42b8b6c
--- /dev/null
+++ b/python/pyarmnn/test/test_const_tensor.py
@@ -0,0 +1,199 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import pytest
+import numpy as np
+
+import pyarmnn as ann
+
+
+def _get_tensor_info(dt):
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt)
+
+ return tensor_info
+
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, np.random.randint(1, size=(2, 4)).astype(np.float32)),
+ (ann.DataType_Float16, np.random.randint(1, size=(2, 4)).astype(np.float16)),
+ (ann.DataType_QuantisedAsymm8, np.random.randint(1, size=(2, 4)).astype(np.uint8)),
+ (ann.DataType_Signed32, np.random.randint(1, size=(2, 4)).astype(np.int32)),
+ (ann.DataType_QuantisedSymm16, np.random.randint(1, size=(2, 4)).astype(np.int16))
+ ], ids=['float32', 'float16', 'unsigned int8', 'int32', 'int16'])
+def test_const_tensor_too_many_elements(dt, data):
+ tensor_info = _get_tensor_info(dt)
+ num_bytes = tensor_info.GetNumBytes()
+
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(tensor_info, data)
+
+ assert 'ConstTensor requires {} bytes, {} provided.'.format(num_bytes, data.nbytes) in str(err.value)
+
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, np.random.randint(1, size=(2, 2)).astype(np.float32)),
+ (ann.DataType_Float16, np.random.randint(1, size=(2, 2)).astype(np.float16)),
+ (ann.DataType_QuantisedAsymm8, np.random.randint(1, size=(2, 2)).astype(np.uint8)),
+ (ann.DataType_Signed32, np.random.randint(1, size=(2, 2)).astype(np.int32)),
+ (ann.DataType_QuantisedSymm16, np.random.randint(1, size=(2, 2)).astype(np.int16))
+ ], ids=['float32', 'float16', 'unsigned int8', 'int32', 'int16'])
+def test_const_tensor_too_little_elements(dt, data):
+ tensor_info = _get_tensor_info(dt)
+ num_bytes = tensor_info.GetNumBytes()
+
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(tensor_info, data)
+
+ assert 'ConstTensor requires {} bytes, {} provided.'.format(num_bytes, data.nbytes) in str(err.value)
+
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.float32)),
+ (ann.DataType_Float16, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.float16)),
+ (ann.DataType_QuantisedAsymm8, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.uint8)),
+ (ann.DataType_Signed32, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int32)),
+ (ann.DataType_QuantisedSymm16, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int16))
+ ], ids=['float32', 'float16', 'unsigned int8', 'int32', 'int16'])
+def test_const_tensor_multi_dimensional_input(dt, data):
+ tensor = ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt), data)
+
+ assert data.size == tensor.GetNumElements()
+ assert data.nbytes == tensor.GetNumBytes()
+ assert dt == tensor.GetDataType()
+ assert tensor.get_memory_area().data
+
+
+def test_create_const_tensor_from_tensor():
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+ tensor = ann.Tensor(tensor_info)
+ copied_tensor = ann.ConstTensor(tensor)
+
+ assert copied_tensor != tensor, "Different objects"
+ assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
+ assert copied_tensor.get_memory_area().data == tensor.get_memory_area().data, "Same memory area"
+ assert copied_tensor.GetNumElements() == tensor.GetNumElements()
+ assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
+ assert copied_tensor.GetDataType() == tensor.GetDataType()
+
+
+def test_const_tensor_from_tensor_has_memory_area_access_after_deletion_of_original_tensor():
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+ tensor = ann.Tensor(tensor_info)
+
+ tensor.get_memory_area()[0] = 100
+
+ copied_mem = tensor.get_memory_area().copy()
+
+ assert 100 == copied_mem[0], "Memory was copied correctly"
+
+ copied_tensor = ann.ConstTensor(tensor)
+
+ tensor.get_memory_area()[0] = 200
+
+ assert 200 == tensor.get_memory_area()[0], "Tensor and copied Tensor point to the same memory"
+ assert 200 == copied_tensor.get_memory_area()[0], "Tensor and copied Tensor point to the same memory"
+
+ assert 100 == copied_mem[0], "Copied test memory not affected"
+
+ copied_mem[0] = 200 # modify test memory to equal copied Tensor
+
+ del tensor
+ np.testing.assert_array_equal(copied_tensor.get_memory_area(), copied_mem), "After initial tensor was deleted, " \
+ "copied Tensor still has " \
+ "its memory as expected"
+
+
+def test_create_const_tensor_incorrect_args():
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor('something', 'something')
+
+ expected_error_message = "Incorrect number of arguments or type of arguments provided to create Const Tensor."
+ assert expected_error_message in str(err.value)
+
+
+@pytest.mark.parametrize("dt, data",
+ [
+ # -1 not in data type enum
+ (-1, np.random.randint(1, size=(2, 3)).astype(np.float32)),
+ ], ids=['unknown'])
+def test_const_tensor_unsupported_datatype(dt, data):
+ tensor_info = _get_tensor_info(dt)
+
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(tensor_info, data)
+
+ assert 'The data type provided for this Tensor is not supported: -1' in str(err.value)
+
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, [[1, 1, 1], [1, 1, 1]]),
+ (ann.DataType_Float16, [[1, 1, 1], [1, 1, 1]]),
+ (ann.DataType_QuantisedAsymm8, [[1, 1, 1], [1, 1, 1]])
+ ], ids=['float32', 'float16', 'unsigned int8'])
+def test_const_tensor_incorrect_input_datatype(dt, data):
+ tensor_info = _get_tensor_info(dt)
+
+ with pytest.raises(TypeError) as err:
+ ann.ConstTensor(tensor_info, data)
+
+ assert 'Data must be provided as a numpy array.' in str(err.value)
+
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, np.random.randint(1, size=(2, 3)).astype(np.float32)),
+ (ann.DataType_Float16, np.random.randint(1, size=(2, 3)).astype(np.float16)),
+ (ann.DataType_QuantisedAsymm8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
+ (ann.DataType_Signed32, np.random.randint(1, size=(2, 3)).astype(np.int32)),
+ (ann.DataType_QuantisedSymm16, np.random.randint(1, size=(2, 3)).astype(np.int16))
+ ], ids=['float32', 'float16', 'unsigned int8', 'int32', 'int16'])
+class TestNumpyDataTypes:
+
+ def test_copy_const_tensor(self, dt, data):
+ tensor_info = _get_tensor_info(dt)
+ tensor = ann.ConstTensor(tensor_info, data)
+ copied_tensor = ann.ConstTensor(tensor)
+
+ assert copied_tensor != tensor, "Different objects"
+ assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
+ assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area"
+ assert copied_tensor.GetNumElements() == tensor.GetNumElements()
+ assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
+ assert copied_tensor.GetDataType() == tensor.GetDataType()
+
+ def test_const_tensor__str__(self, dt, data):
+ tensor_info = _get_tensor_info(dt)
+ d_type = tensor_info.GetDataType()
+ num_dimensions = tensor_info.GetNumDimensions()
+ num_bytes = tensor_info.GetNumBytes()
+ num_elements = tensor_info.GetNumElements()
+ tensor = ann.ConstTensor(tensor_info, data)
+
+ assert str(tensor) == "ConstTensor{{DataType: {}, NumBytes: {}, NumDimensions: " \
+ "{}, NumElements: {}}}".format(d_type, num_bytes, num_dimensions, num_elements)
+
+ def test_const_tensor_with_info(self, dt, data):
+ tensor_info = _get_tensor_info(dt)
+ elements = tensor_info.GetNumElements()
+ num_bytes = tensor_info.GetNumBytes()
+ d_type = dt
+
+ tensor = ann.ConstTensor(tensor_info, data)
+
+ assert tensor_info != tensor.GetInfo(), "Different objects"
+ assert elements == tensor.GetNumElements()
+ assert num_bytes == tensor.GetNumBytes()
+ assert d_type == tensor.GetDataType()
+
+ def test_immutable_memory(self, dt, data):
+ tensor_info = _get_tensor_info(dt)
+
+ tensor = ann.ConstTensor(tensor_info, data)
+
+ with pytest.raises(ValueError) as err:
+ tensor.get_memory_area()[0] = 0
+
+ assert 'is read-only' in str(err.value)
diff --git a/python/pyarmnn/test/test_descriptors.py b/python/pyarmnn/test/test_descriptors.py
new file mode 100644
index 0000000000..edca7ed024
--- /dev/null
+++ b/python/pyarmnn/test/test_descriptors.py
@@ -0,0 +1,528 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import inspect
+
+import pytest
+
+import pyarmnn as ann
+import numpy as np
+import pyarmnn._generated.pyarmnn as generated
+
+
+def test_activation_descriptor_default_values():
+ desc = ann.ActivationDescriptor()
+ assert desc.m_Function == ann.ActivationFunction_Sigmoid
+ assert desc.m_A == 0
+ assert desc.m_B == 0
+
+
+def test_argminmax_descriptor_default_values():
+ desc = ann.ArgMinMaxDescriptor()
+ assert desc.m_Function == ann.ArgMinMaxFunction_Min
+ assert desc.m_Axis == -1
+
+
+def test_batchnormalization_descriptor_default_values():
+ desc = ann.BatchNormalizationDescriptor()
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+ np.allclose(0.0001, desc.m_Eps)
+
+
+def test_batchtospacend_descriptor_default_values():
+ desc = ann.BatchToSpaceNdDescriptor()
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+ assert [1, 1] == desc.m_BlockShape
+ assert [(0, 0), (0, 0)] == desc.m_Crops
+
+
+def test_batchtospacend_descriptor_assignment():
+ desc = ann.BatchToSpaceNdDescriptor()
+ desc.m_BlockShape = (1, 2, 3)
+
+ ololo = [(1, 2), (3, 4)]
+ size_1 = len(ololo)
+ desc.m_Crops = ololo
+
+ assert size_1 == len(ololo)
+ desc.m_DataLayout = ann.DataLayout_NHWC
+ assert ann.DataLayout_NHWC == desc.m_DataLayout
+ assert [1, 2, 3] == desc.m_BlockShape
+ assert [(1, 2), (3, 4)] == desc.m_Crops
+
+
+@pytest.mark.parametrize("input_shape, value, vtype", [([-1], -1, 'int'), (("one", "two"), "'one'", 'str'),
+ ([1.33, 4.55], 1.33, 'float'),
+ ([{1: "one"}], "{1: 'one'}", 'dict')], ids=lambda x: str(x))
+def test_batchtospacend_descriptor_rubbish_assignment_shape(input_shape, value, vtype):
+ desc = ann.BatchToSpaceNdDescriptor()
+ with pytest.raises(TypeError) as err:
+ desc.m_BlockShape = input_shape
+
+ assert "Failed to convert python input value {} of type '{}' to C type 'j'".format(value, vtype) in str(err.value)
+
+
+@pytest.mark.parametrize("input_crops, value, vtype", [([(1, 2), (3, 4, 5)], '(3, 4, 5)', 'tuple'),
+ ([(1, 'one')], "(1, 'one')", 'tuple'),
+ ([-1], -1, 'int'),
+ ([(1, (1, 2))], '(1, (1, 2))', 'tuple'),
+ ([[1, [1, 2]]], '[1, [1, 2]]', 'list')
+ ], ids=lambda x: str(x))
+def test_batchtospacend_descriptor_rubbish_assignment_crops(input_crops, value, vtype):
+ desc = ann.BatchToSpaceNdDescriptor()
+ with pytest.raises(TypeError) as err:
+ desc.m_Crops = input_crops
+
+ assert "Failed to convert python input value {} of type '{}' to C type".format(value, vtype) in str(err.value)
+
+
+def test_batchtospacend_descriptor_empty_assignment():
+ desc = ann.BatchToSpaceNdDescriptor()
+ desc.m_BlockShape = []
+ assert [] == desc.m_BlockShape
+
+
+def test_batchtospacend_descriptor_ctor():
+ desc = ann.BatchToSpaceNdDescriptor([1, 2, 3], [(4, 5), (6, 7)])
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+ assert [1, 2, 3] == desc.m_BlockShape
+ assert [(4, 5), (6, 7)] == desc.m_Crops
+
+
+def test_convolution2d_descriptor_default_values():
+ desc = ann.Convolution2dDescriptor()
+ assert desc.m_PadLeft == 0
+ assert desc.m_PadTop == 0
+ assert desc.m_PadRight == 0
+ assert desc.m_PadBottom == 0
+ assert desc.m_StrideX == 0
+ assert desc.m_StrideY == 0
+ assert desc.m_DilationX == 1
+ assert desc.m_DilationY == 1
+ assert desc.m_BiasEnabled == False
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_depthtospace_descriptor_default_values():
+ desc = ann.DepthToSpaceDescriptor()
+ assert desc.m_BlockSize == 1
+ assert desc.m_DataLayout == ann.DataLayout_NHWC
+
+
+def test_depthwise_convolution2d_descriptor_default_values():
+ desc = ann.DepthwiseConvolution2dDescriptor()
+ assert desc.m_PadLeft == 0
+ assert desc.m_PadTop == 0
+ assert desc.m_PadRight == 0
+ assert desc.m_PadBottom == 0
+ assert desc.m_StrideX == 0
+ assert desc.m_StrideY == 0
+ assert desc.m_DilationX == 1
+ assert desc.m_DilationY == 1
+ assert desc.m_BiasEnabled == False
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_detectionpostprocess_descriptor_default_values():
+ desc = ann.DetectionPostProcessDescriptor()
+ assert desc.m_MaxDetections == 0
+ assert desc.m_MaxClassesPerDetection == 1
+ assert desc.m_DetectionsPerClass == 1
+ assert desc.m_NmsScoreThreshold == 0
+ assert desc.m_NmsIouThreshold == 0
+ assert desc.m_NumClasses == 0
+ assert desc.m_UseRegularNms == False
+ assert desc.m_ScaleH == 0
+ assert desc.m_ScaleW == 0
+ assert desc.m_ScaleX == 0
+ assert desc.m_ScaleY == 0
+
+
+def test_fakequantization_descriptor_default_values():
+ desc = ann.FakeQuantizationDescriptor()
+ np.allclose(6, desc.m_Max)
+ np.allclose(-6, desc.m_Min)
+
+
+def test_fully_connected_descriptor_default_values():
+ desc = ann.FullyConnectedDescriptor()
+ assert desc.m_BiasEnabled == False
+ assert desc.m_TransposeWeightMatrix == False
+
+
+def test_instancenormalization_descriptor_default_values():
+ desc = ann.InstanceNormalizationDescriptor()
+ assert desc.m_Gamma == 1
+ assert desc.m_Beta == 0
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+ np.allclose(1e-12, desc.m_Eps)
+
+
+def test_lstm_descriptor_default_values():
+ desc = ann.LstmDescriptor()
+ assert desc.m_ActivationFunc == 1
+ assert desc.m_ClippingThresCell == 0
+ assert desc.m_ClippingThresProj == 0
+ assert desc.m_CifgEnabled == True
+ assert desc.m_PeepholeEnabled == False
+ assert desc.m_ProjectionEnabled == False
+ assert desc.m_LayerNormEnabled == False
+
+
+def test_l2normalization_descriptor_default_values():
+ desc = ann.L2NormalizationDescriptor()
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+ np.allclose(1e-12, desc.m_Eps)
+
+
+def test_mean_descriptor_default_values():
+ desc = ann.MeanDescriptor()
+ assert desc.m_KeepDims == False
+
+
+def test_normalization_descriptor_default_values():
+ desc = ann.NormalizationDescriptor()
+ assert desc.m_NormChannelType == ann.NormalizationAlgorithmChannel_Across
+ assert desc.m_NormMethodType == ann.NormalizationAlgorithmMethod_LocalBrightness
+ assert desc.m_NormSize == 0
+ assert desc.m_Alpha == 0
+ assert desc.m_Beta == 0
+ assert desc.m_K == 0
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_origin_descriptor_default_values():
+ desc = ann.ConcatDescriptor()
+ assert 0 == desc.GetNumViews()
+ assert 0 == desc.GetNumDimensions()
+ assert 1 == desc.GetConcatAxis()
+
+
+def test_origin_descriptor_incorrect_views():
+ desc = ann.ConcatDescriptor(2, 2)
+ with pytest.raises(RuntimeError) as err:
+ desc.SetViewOriginCoord(1000, 100, 1000)
+ assert "Failed to set view origin coordinates." in str(err.value)
+
+
+def test_origin_descriptor_ctor():
+ desc = ann.ConcatDescriptor(2, 2)
+ value = 5
+ for i in range(desc.GetNumViews()):
+ for j in range(desc.GetNumDimensions()):
+ desc.SetViewOriginCoord(i, j, value+i)
+ desc.SetConcatAxis(1)
+
+ assert 2 == desc.GetNumViews()
+ assert 2 == desc.GetNumDimensions()
+ assert [5, 5] == desc.GetViewOrigin(0)
+ assert [6, 6] == desc.GetViewOrigin(1)
+ assert 1 == desc.GetConcatAxis()
+
+
+def test_pad_descriptor_default_values():
+ desc = ann.PadDescriptor()
+ assert desc.m_PadValue == 0
+
+
+def test_permute_descriptor_default_values():
+ pv = ann.PermutationVector((0, 2, 3, 1))
+ desc = ann.PermuteDescriptor(pv)
+ assert desc.m_DimMappings.GetSize() == 4
+ assert desc.m_DimMappings[0] == 0
+ assert desc.m_DimMappings[1] == 2
+ assert desc.m_DimMappings[2] == 3
+ assert desc.m_DimMappings[3] == 1
+
+
+def test_pooling_descriptor_default_values():
+ desc = ann.Pooling2dDescriptor()
+ assert desc.m_PoolType == ann.PoolingAlgorithm_Max
+ assert desc.m_PadLeft == 0
+ assert desc.m_PadTop == 0
+ assert desc.m_PadRight == 0
+ assert desc.m_PadBottom == 0
+ assert desc.m_PoolHeight == 0
+ assert desc.m_PoolWidth == 0
+ assert desc.m_StrideX == 0
+ assert desc.m_StrideY == 0
+ assert desc.m_OutputShapeRounding == ann.OutputShapeRounding_Floor
+ assert desc.m_PaddingMethod == ann.PaddingMethod_Exclude
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_reshape_descriptor_default_values():
+ desc = ann.ReshapeDescriptor()
+ # check the empty Targetshape
+ assert desc.m_TargetShape.GetNumDimensions() == 0
+
+
+def test_slice_descriptor_default_values():
+ desc = ann.SliceDescriptor()
+ assert desc.m_TargetWidth == 0
+ assert desc.m_TargetHeight == 0
+ assert desc.m_Method == ann.ResizeMethod_NearestNeighbor
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_resize_descriptor_default_values():
+ desc = ann.ResizeDescriptor()
+ assert desc.m_TargetWidth == 0
+ assert desc.m_TargetHeight == 0
+ assert desc.m_Method == ann.ResizeMethod_NearestNeighbor
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_spacetobatchnd_descriptor_default_values():
+ desc = ann.SpaceToBatchNdDescriptor()
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_spacetodepth_descriptor_default_values():
+ desc = ann.SpaceToDepthDescriptor()
+ assert desc.m_BlockSize == 1
+ assert desc.m_DataLayout == ann.DataLayout_NHWC
+
+
+def test_stack_descriptor_default_values():
+ desc = ann.StackDescriptor()
+ assert desc.m_Axis == 0
+ assert desc.m_NumInputs == 0
+ # check the empty Inputshape
+ assert desc.m_InputShape.GetNumDimensions() == 0
+
+
+def test_slice_descriptor_default_values():
+ desc = ann.SliceDescriptor()
+ desc.m_Begin = [1, 2, 3, 4, 5]
+ desc.m_Size = (1, 2, 3, 4)
+
+ assert [1, 2, 3, 4, 5] == desc.m_Begin
+ assert [1, 2, 3, 4] == desc.m_Size
+
+
+def test_slice_descriptor_ctor():
+ desc = ann.SliceDescriptor([1, 2, 3, 4, 5], (1, 2, 3, 4))
+
+ assert [1, 2, 3, 4, 5] == desc.m_Begin
+ assert [1, 2, 3, 4] == desc.m_Size
+
+
+def test_strided_slice_descriptor_default_values():
+ desc = ann.StridedSliceDescriptor()
+ desc.m_Begin = [1, 2, 3, 4, 5]
+ desc.m_End = [6, 7, 8, 9, 10]
+ desc.m_Stride = (10, 10)
+ desc.m_BeginMask = 1
+ desc.m_EndMask = 2
+ desc.m_ShrinkAxisMask = 3
+ desc.m_EllipsisMask = 4
+ desc.m_NewAxisMask = 5
+
+ assert [1, 2, 3, 4, 5] == desc.m_Begin
+ assert [6, 7, 8, 9, 10] == desc.m_End
+ assert [10, 10] == desc.m_Stride
+ assert 1 == desc.m_BeginMask
+ assert 2 == desc.m_EndMask
+ assert 3 == desc.m_ShrinkAxisMask
+ assert 4 == desc.m_EllipsisMask
+ assert 5 == desc.m_NewAxisMask
+
+
+def test_strided_slice_descriptor_ctor():
+ desc = ann.StridedSliceDescriptor([1, 2, 3, 4, 5], [6, 7, 8, 9, 10], (10, 10))
+ desc.m_Begin = [1, 2, 3, 4, 5]
+ desc.m_End = [6, 7, 8, 9, 10]
+ desc.m_Stride = (10, 10)
+
+ assert [1, 2, 3, 4, 5] == desc.m_Begin
+ assert [6, 7, 8, 9, 10] == desc.m_End
+ assert [10, 10] == desc.m_Stride
+
+
+def test_softmax_descriptor_default_values():
+ desc = ann.SoftmaxDescriptor()
+ assert desc.m_Axis == -1
+ np.allclose(1.0, desc.m_Beta)
+
+
+def test_space_to_batch_nd_descriptor_default_values():
+ desc = ann.SpaceToBatchNdDescriptor()
+ assert [1, 1] == desc.m_BlockShape
+ assert [(0, 0), (0, 0)] == desc.m_PadList
+ assert ann.DataLayout_NCHW == desc.m_DataLayout
+
+
+def test_space_to_batch_nd_descriptor_assigned_values():
+ desc = ann.SpaceToBatchNdDescriptor()
+ desc.m_BlockShape = (90, 100)
+ desc.m_PadList = [(1, 2), (3, 4)]
+ assert [90, 100] == desc.m_BlockShape
+ assert [(1, 2), (3, 4)] == desc.m_PadList
+ assert ann.DataLayout_NCHW == desc.m_DataLayout
+
+
+def test_space_to_batch_nd_descriptor_ctor():
+ desc = ann.SpaceToBatchNdDescriptor((1, 2, 3), [(1, 2), (3, 4)])
+ assert [1, 2, 3] == desc.m_BlockShape
+ assert [(1, 2), (3, 4)] == desc.m_PadList
+ assert ann.DataLayout_NCHW == desc.m_DataLayout
+
+
+def test_transpose_convolution2d_descriptor_default_values():
+ desc = ann.DepthwiseConvolution2dDescriptor()
+ assert desc.m_PadLeft == 0
+ assert desc.m_PadTop == 0
+ assert desc.m_PadRight == 0
+ assert desc.m_PadBottom == 0
+ assert desc.m_StrideX == 0
+ assert desc.m_StrideY == 0
+ assert desc.m_BiasEnabled == False
+ assert desc.m_DataLayout == ann.DataLayout_NCHW
+
+
+def test_view_descriptor_default_values():
+ desc = ann.SplitterDescriptor()
+ assert 0 == desc.GetNumViews()
+ assert 0 == desc.GetNumDimensions()
+
+
+def test_view_descriptor_incorrect_input():
+ desc = ann.SplitterDescriptor(2, 3)
+ with pytest.raises(RuntimeError) as err:
+ desc.SetViewOriginCoord(1000, 100, 1000)
+ assert "Failed to set view origin coordinates." in str(err.value)
+
+ with pytest.raises(RuntimeError) as err:
+ desc.SetViewSize(1000, 100, 1000)
+ assert "Failed to set view size." in str(err.value)
+
+
+def test_view_descriptor_ctor():
+ desc = ann.SplitterDescriptor(2, 3)
+ value_size = 1
+ value_orig_coord = 5
+ for i in range(desc.GetNumViews()):
+ for j in range(desc.GetNumDimensions()):
+ desc.SetViewOriginCoord(i, j, value_orig_coord+i)
+ desc.SetViewSize(i, j, value_size+i)
+
+ assert 2 == desc.GetNumViews()
+ assert 3 == desc.GetNumDimensions()
+ assert [5, 5] == desc.GetViewOrigin(0)
+ assert [6, 6] == desc.GetViewOrigin(1)
+ assert [1, 1] == desc.GetViewSizes(0)
+ assert [2, 2] == desc.GetViewSizes(1)
+
+
+def test_createdescriptorforconcatenation_ctor():
+ input_shape_vector = [ann.TensorShape((2, 1)), ann.TensorShape((3, 1)), ann.TensorShape((4, 1))]
+ desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
+ assert 3 == desc.GetNumViews()
+ assert 0 == desc.GetConcatAxis()
+ assert 2 == desc.GetNumDimensions()
+ c = desc.GetViewOrigin(1)
+ d = desc.GetViewOrigin(0)
+
+
+def test_createdescriptorforconcatenation_wrong_shape_for_axis():
+ input_shape_vector = [ann.TensorShape((1, 2)), ann.TensorShape((3, 4)), ann.TensorShape((5, 6))]
+ with pytest.raises(RuntimeError) as err:
+ desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
+
+ assert "All inputs to concatenation must be the same size along all dimensions except the concatenation dimension" in str(
+ err.value)
+
+
+@pytest.mark.parametrize("input_shape_vector", [([-1, "one"]),
+ ([1.33, 4.55]),
+ ([{1: "one"}])], ids=lambda x: str(x))
+def test_createdescriptorforconcatenation_rubbish_assignment_shape_vector(input_shape_vector):
+ with pytest.raises(TypeError) as err:
+ desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
+
+ assert "in method 'CreateDescriptorForConcatenation', argument 1 of type 'std::vector< armnn::TensorShape,std::allocator< armnn::TensorShape > >'" in str(
+ err.value)
+
+
+generated_classes = inspect.getmembers(generated, inspect.isclass)
+generated_classes_names = list(map(lambda x: x[0], generated_classes))
+@pytest.mark.parametrize("desc_name", ['ActivationDescriptor',
+ 'ArgMinMaxDescriptor',
+ 'PermuteDescriptor',
+ 'SoftmaxDescriptor',
+ 'ConcatDescriptor',
+ 'SplitterDescriptor',
+ 'Pooling2dDescriptor',
+ 'FullyConnectedDescriptor',
+ 'Convolution2dDescriptor',
+ 'DepthwiseConvolution2dDescriptor',
+ 'DetectionPostProcessDescriptor',
+ 'NormalizationDescriptor',
+ 'L2NormalizationDescriptor',
+ 'BatchNormalizationDescriptor',
+ 'InstanceNormalizationDescriptor',
+ 'BatchToSpaceNdDescriptor',
+ 'FakeQuantizationDescriptor',
+ 'ResizeDescriptor',
+ 'ReshapeDescriptor',
+ 'SpaceToBatchNdDescriptor',
+ 'SpaceToDepthDescriptor',
+ 'LstmDescriptor',
+ 'MeanDescriptor',
+ 'PadDescriptor',
+ 'SliceDescriptor',
+ 'StackDescriptor',
+ 'StridedSliceDescriptor',
+ 'TransposeConvolution2dDescriptor'])
+class TestDescriptorMassChecks:
+
+ def test_desc_implemented(self, desc_name):
+ assert desc_name in generated_classes_names
+
+ def test_desc_equal(self, desc_name):
+ desc_class = next(filter(lambda x: x[0] == desc_name ,generated_classes))[1]
+
+ assert desc_class() == desc_class()
+
+
+
+generated_classes = inspect.getmembers(generated, inspect.isclass)
+generated_classes_names = list(map(lambda x: x[0], generated_classes))
+@pytest.mark.parametrize("desc_name", ['ActivationDescriptor',
+ 'ArgMinMaxDescriptor',
+ 'PermuteDescriptor',
+ 'SoftmaxDescriptor',
+ 'ConcatDescriptor',
+ 'SplitterDescriptor',
+ 'Pooling2dDescriptor',
+ 'FullyConnectedDescriptor',
+ 'Convolution2dDescriptor',
+ 'DepthwiseConvolution2dDescriptor',
+ 'DetectionPostProcessDescriptor',
+ 'NormalizationDescriptor',
+ 'L2NormalizationDescriptor',
+ 'BatchNormalizationDescriptor',
+ 'InstanceNormalizationDescriptor',
+ 'BatchToSpaceNdDescriptor',
+ 'FakeQuantizationDescriptor',
+ 'ResizeDescriptor',
+ 'ReshapeDescriptor',
+ 'SpaceToBatchNdDescriptor',
+ 'SpaceToDepthDescriptor',
+ 'LstmDescriptor',
+ 'MeanDescriptor',
+ 'PadDescriptor',
+ 'SliceDescriptor',
+ 'StackDescriptor',
+ 'StridedSliceDescriptor',
+ 'TransposeConvolution2dDescriptor'])
+class TestDescriptorMassChecks:
+
+ def test_desc_implemented(self, desc_name):
+ assert desc_name in generated_classes_names
+
+ def test_desc_equal(self, desc_name):
+ desc_class = next(filter(lambda x: x[0] == desc_name ,generated_classes))[1]
+
+ assert desc_class() == desc_class()
+
diff --git a/python/pyarmnn/test/test_generated.py b/python/pyarmnn/test/test_generated.py
new file mode 100644
index 0000000000..c038b20ccb
--- /dev/null
+++ b/python/pyarmnn/test/test_generated.py
@@ -0,0 +1,52 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import inspect
+from typing import Tuple
+
+import pytest
+
+import pyarmnn._generated.pyarmnn as generated_armnn
+import pyarmnn._generated.pyarmnn_caffeparser as generated_caffe
+import pyarmnn._generated.pyarmnn_onnxparser as generated_onnx
+import pyarmnn._generated.pyarmnn_tfliteparser as generated_tflite
+import pyarmnn._generated.pyarmnn_tfparser as generated_tf
+
+swig_independent_classes = ('IBackend',
+ 'IDeviceSpec',
+ 'IConnectableLayer',
+ 'IInputSlot',
+ 'IOutputSlot',
+ 'IProfiler')
+
+
+def get_classes(swig_independent_classes: Tuple):
+ # We need to ignore some swig generated_armnn classes. This is because some are abstract classes
+ # They cannot be created with the swig generated_armnn wrapper, therefore they don't need a destructor.
+ # Swig also generates its own meta class - this needs to be ignored.
+ ignored_class_names = (*swig_independent_classes, '_SwigNonDynamicMeta')
+ return list(filter(lambda x: x[0] not in ignored_class_names,
+ inspect.getmembers(generated_armnn, inspect.isclass) +
+ inspect.getmembers(generated_caffe, inspect.isclass) +
+ inspect.getmembers(generated_tflite, inspect.isclass) +
+ inspect.getmembers(generated_onnx, inspect.isclass) +
+ inspect.getmembers(generated_tf, inspect.isclass)))
+
+
+@pytest.mark.parametrize("class_instance", get_classes(swig_independent_classes), ids=lambda x: 'class={}'.format(x[0]))
+class TestPyOwnedClasses:
+
+ def test_destructors_exist_per_class(self, class_instance):
+ assert getattr(class_instance[1], '__swig_destroy__', None)
+
+ def test_owned(self, class_instance):
+ assert getattr(class_instance[1], 'thisown', None)
+
+
+@pytest.mark.parametrize("class_instance", swig_independent_classes)
+class TestPyIndependentClasses:
+
+ def test_destructors_does_not_exist_per_class(self, class_instance):
+ assert not getattr(class_instance[1], '__swig_destroy__', None)
+
+ def test_not_owned(self, class_instance):
+ assert not getattr(class_instance[1], 'thisown', None)
diff --git a/python/pyarmnn/test/test_iconnectable.py b/python/pyarmnn/test/test_iconnectable.py
new file mode 100644
index 0000000000..91a39f3b2c
--- /dev/null
+++ b/python/pyarmnn/test/test_iconnectable.py
@@ -0,0 +1,143 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import pytest
+
+import pyarmnn as ann
+
+
+@pytest.fixture(scope="function")
+def network():
+ return ann.INetwork()
+
+
+class TestIInputIOutputIConnectable:
+
+ def test_input_slot(self, network):
+ # Create input, addition & output layer
+ input1 = network.AddInputLayer(0, "input1")
+ input2 = network.AddInputLayer(1, "input2")
+ add = network.AddAdditionLayer("addition")
+ output = network.AddOutputLayer(0, "output")
+
+ # Connect the input/output slots for each layer
+ input1.GetOutputSlot(0).Connect(add.GetInputSlot(0))
+ input2.GetOutputSlot(0).Connect(add.GetInputSlot(1))
+ add.GetOutputSlot(0).Connect(output.GetInputSlot(0))
+
+ # Check IInputSlot GetConnection()
+ input_slot = add.GetInputSlot(0)
+ input_slot_connection = input_slot.GetConnection()
+
+ assert isinstance(input_slot_connection, ann.IOutputSlot)
+
+ del input_slot_connection
+
+ assert input_slot.GetConnection()
+ assert isinstance(input_slot.GetConnection(), ann.IOutputSlot)
+
+ del input_slot
+
+ assert add.GetInputSlot(0)
+
+ def test_output_slot(self, network):
+
+ # Create input, addition & output layer
+ input1 = network.AddInputLayer(0, "input1")
+ input2 = network.AddInputLayer(1, "input2")
+ add = network.AddAdditionLayer("addition")
+ output = network.AddOutputLayer(0, "output")
+
+ # Connect the input/output slots for each layer
+ input1.GetOutputSlot(0).Connect(add.GetInputSlot(0))
+ input2.GetOutputSlot(0).Connect(add.GetInputSlot(1))
+ add.GetOutputSlot(0).Connect(output.GetInputSlot(0))
+
+ # Check IInputSlot GetConnection()
+ add_get_input_connection = add.GetInputSlot(0).GetConnection()
+ output_get_input_connection = output.GetInputSlot(0).GetConnection()
+
+ # Check IOutputSlot GetConnection()
+ add_get_output_connect = add.GetOutputSlot(0).GetConnection(0)
+ assert isinstance(add_get_output_connect.GetConnection(), ann.IOutputSlot)
+
+ # Test IOutputSlot GetNumConnections() & CalculateIndexOnOwner()
+ assert add_get_input_connection.GetNumConnections() == 1
+ assert len(add_get_input_connection) == 1
+ assert add_get_input_connection[0]
+ assert add_get_input_connection.CalculateIndexOnOwner() == 0
+
+ # Check GetOwningLayerGuid(). Check that it is different for add and output layer
+ assert add_get_input_connection.GetOwningLayerGuid() != output_get_input_connection.GetOwningLayerGuid()
+
+ # Set TensorInfo
+ test_tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+
+ # Check IsTensorInfoSet()
+ assert not add_get_input_connection.IsTensorInfoSet()
+ add_get_input_connection.SetTensorInfo(test_tensor_info)
+ assert add_get_input_connection.IsTensorInfoSet()
+
+ # Check GetTensorInfo()
+ output_tensor_info = add_get_input_connection.GetTensorInfo()
+ assert 2 == output_tensor_info.GetNumDimensions()
+ assert 6 == output_tensor_info.GetNumElements()
+
+ # Check Disconnect()
+ assert output_get_input_connection.GetNumConnections() == 1 # 1 connection to Outputslot0 from input1
+ add.GetOutputSlot(0).Disconnect(output.GetInputSlot(0)) # disconnect add.OutputSlot0 from Output.InputSlot0
+ assert output_get_input_connection.GetNumConnections() == 0
+
+ def test_output_slot__out_of_range(self, network):
+ # Create input layer to check output slot get item handling
+ input1 = network.AddInputLayer(0, "input1")
+
+ outputSlot = input1.GetOutputSlot(0)
+ with pytest.raises(ValueError) as err:
+ outputSlot[1]
+
+ assert "Invalid index 1 provided" in str(err.value)
+
+ def test_iconnectable_guid(self, network):
+
+ # Check IConnectable GetGuid()
+ # Note Guid can change based on which tests are run so
+ # checking here that each layer does not have the same guid
+ add_id = network.AddAdditionLayer().GetGuid()
+ output_id = network.AddOutputLayer(0).GetGuid()
+ assert add_id != output_id
+
+ def test_iconnectable_layer_functions(self, network):
+
+ # Create input, addition & output layer
+ input1 = network.AddInputLayer(0, "input1")
+ input2 = network.AddInputLayer(1, "input2")
+ add = network.AddAdditionLayer("addition")
+ output = network.AddOutputLayer(0, "output")
+
+ # Check GetNumInputSlots(), GetName() & GetNumOutputSlots()
+ assert input1.GetNumInputSlots() == 0
+ assert input1.GetName() == "input1"
+ assert input1.GetNumOutputSlots() == 1
+
+ assert input2.GetNumInputSlots() == 0
+ assert input2.GetName() == "input2"
+ assert input2.GetNumOutputSlots() == 1
+
+ assert add.GetNumInputSlots() == 2
+ assert add.GetName() == "addition"
+ assert add.GetNumOutputSlots() == 1
+
+ assert output.GetNumInputSlots() == 1
+ assert output.GetName() == "output"
+ assert output.GetNumOutputSlots() == 0
+
+ # Check GetOutputSlot()
+ input1_get_output = input1.GetOutputSlot(0)
+ assert input1_get_output.GetNumConnections() == 0
+ assert len(input1_get_output) == 0
+
+ # Check GetInputSlot()
+ add_get_input = add.GetInputSlot(0)
+ add_get_input.GetConnection()
+ assert isinstance(add_get_input, ann.IInputSlot)
+
diff --git a/python/pyarmnn/test/test_network.py b/python/pyarmnn/test/test_network.py
new file mode 100644
index 0000000000..5334cc50c0
--- /dev/null
+++ b/python/pyarmnn/test/test_network.py
@@ -0,0 +1,310 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+import stat
+import platform
+
+import pytest
+import pyarmnn as ann
+
+
+@pytest.fixture(scope="function")
+def get_runtime(shared_data_folder, network_file):
+ parser= ann.ITfLiteParser()
+ preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, network_file))
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ yield preferred_backends, network, runtime
+
+
+@pytest.mark.parametrize("network_file",
+ [
+ 'inception_v3_quant.tflite',
+ 'ssd_mobilenetv1.tflite'
+ ],
+ ids=['inception v3', 'mobilenetssd v1'])
+def test_optimize_executes_successfully(network_file, get_runtime):
+ preferred_backends = [ann.BackendId('CpuRef')]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ assert len(messages) == 0, 'With only CpuRef, there should be no warnings irrelevant of architecture.'
+ assert opt_network
+
+
+@pytest.mark.parametrize("network_file",
+ [
+ 'inception_v3_quant.tflite',
+ ],
+ ids=['inception v3'])
+def test_optimize_owned_by_python(network_file, get_runtime):
+ preferred_backends = get_runtime[0]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ assert opt_network.thisown
+
+
+@pytest.mark.juno
+@pytest.mark.parametrize("network_file",
+ [
+ ('inception_v3_quant.tflite')
+ ],
+ ids=['inception v3'])
+def test_optimize_executes_successfully_for_neon_backend_only(network_file, get_runtime):
+ preferred_backends = [ann.BackendId('CpuAcc')]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ assert 0 == len(messages)
+ assert opt_network
+
+
+@pytest.mark.parametrize("network_file",
+ [
+ 'inception_v3_quant.tflite'
+ ],
+ ids=['inception v3'])
+def test_optimize_fails_for_invalid_backends(network_file, get_runtime):
+ invalid_backends = [ann.BackendId('Unknown')]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+
+ with pytest.raises(RuntimeError) as err:
+ ann.Optimize(network, invalid_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ expected_error_message = "None of the preferred backends [Unknown ] are supported."
+ assert expected_error_message in str(err.value)
+
+
+@pytest.mark.parametrize("network_file",
+ [
+ 'inception_v3_quant.tflite'
+ ],
+ ids=['inception v3'])
+def test_optimize_fails_for_no_backends_specified(network_file, get_runtime):
+ empty_backends = []
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+
+ with pytest.raises(RuntimeError) as err:
+ ann.Optimize(network, empty_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ expected_error_message = "Invoked Optimize with no backends specified"
+ assert expected_error_message in str(err.value)
+
+
+@pytest.mark.parametrize("network_file",
+ [
+ 'inception_v3_quant.tflite'
+ ],
+ ids=['inception v3'])
+def test_serialize_to_dot(network_file, get_runtime, tmpdir):
+ preferred_backends = get_runtime[0]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ dot_file_path = os.path.join(tmpdir, 'ssd.dot')
+ """Check that serialized file does not exist at the start, gets created after SerializeToDot and is not empty"""
+ assert not os.path.exists(dot_file_path)
+ opt_network.SerializeToDot(dot_file_path)
+
+ assert os.path.exists(dot_file_path)
+
+ with open(dot_file_path) as res_file:
+ expected_data = res_file.read()
+ assert len(expected_data) > 1
+ assert '[label=< [1,299,299,3] >]' in expected_data
+
+
+@pytest.mark.skipif(platform.processor() != 'x86_64', reason="Platform specific test")
+@pytest.mark.parametrize("network_file",
+ [
+ 'inception_v3_quant.tflite'
+ ],
+ ids=['inception v3'])
+def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir):
+ preferred_backends = get_runtime[0]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ """Create file, write to it and change mode to read-only"""
+ dot_file_path = os.path.join(tmpdir, 'ssd.dot')
+ f = open(dot_file_path, "w+")
+ f.write("test")
+ f.close()
+ os.chmod(dot_file_path, stat.S_IREAD)
+ assert os.path.exists(dot_file_path)
+
+ with pytest.raises(RuntimeError) as err:
+ opt_network.SerializeToDot(dot_file_path)
+
+ expected_error_message = "Failed to open dot file"
+ assert expected_error_message in str(err.value)
+
+
+@pytest.mark.juno
+@pytest.mark.parametrize("network_file",
+ [
+ 'ssd_mobilenetv1.tflite'
+ ],
+ ids=['mobilenetssd v1'])
+def test_optimize_error_tuple(network_file, get_runtime):
+ preferred_backends = get_runtime[0]
+ network = get_runtime[1]
+ runtime = get_runtime[2]
+
+ opt_network, error_messages = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ assert type(error_messages) == tuple
+ assert 'WARNING: Layer of type DetectionPostProcess is not supported on requested backend CpuAcc for input data ' \
+ 'type QAsymm8' in error_messages[0]
+
+
+@pytest.mark.parametrize("method", [
+ 'AddAbsLayer',
+ 'AddActivationLayer',
+ 'AddAdditionLayer',
+ 'AddArgMinMaxLayer',
+ 'AddBatchNormalizationLayer',
+ 'AddBatchToSpaceNdLayer',
+ 'AddComparisonLayer',
+ 'AddConcatLayer',
+ 'AddConstantLayer',
+ 'AddConvolution2dLayer',
+ 'AddDepthToSpaceLayer',
+ 'AddDepthwiseConvolution2dLayer',
+ 'AddDequantizeLayer',
+ 'AddDetectionPostProcessLayer',
+ 'AddDivisionLayer',
+ 'AddFloorLayer',
+ 'AddFullyConnectedLayer',
+ 'AddGatherLayer',
+ 'AddInputLayer',
+ 'AddInstanceNormalizationLayer',
+ 'AddLogSoftmaxLayer',
+ 'AddL2NormalizationLayer',
+ 'AddLstmLayer',
+ 'AddMaximumLayer',
+ 'AddMeanLayer',
+ 'AddMergeLayer',
+ 'AddMinimumLayer',
+ 'AddMultiplicationLayer',
+ 'AddNormalizationLayer',
+ 'AddOutputLayer',
+ 'AddPadLayer',
+ 'AddPermuteLayer',
+ 'AddPooling2dLayer',
+ 'AddPreluLayer',
+ 'AddQuantizeLayer',
+ 'AddQuantizedLstmLayer',
+ 'AddReshapeLayer',
+ 'AddResizeLayer',
+ 'AddRsqrtLayer',
+ 'AddSliceLayer',
+ 'AddSoftmaxLayer',
+ 'AddSpaceToBatchNdLayer',
+ 'AddSpaceToDepthLayer',
+ 'AddSplitterLayer',
+ 'AddStackLayer',
+ 'AddStandInLayer',
+ 'AddStridedSliceLayer',
+ 'AddSubtractionLayer',
+ 'AddSwitchLayer',
+ 'AddTransposeConvolution2dLayer'
+])
+def test_network_method_exists(method):
+ assert getattr(ann.INetwork, method, None)
+
+
+def test_fullyconnected_layer_optional_none():
+ net = ann.INetwork()
+ layer = net.AddFullyConnectedLayer(fullyConnectedDescriptor=ann.FullyConnectedDescriptor(),
+ weights=ann.ConstTensor())
+
+ assert layer
+
+
+def test_fullyconnected_layer_optional_provided():
+ net = ann.INetwork()
+ layer = net.AddFullyConnectedLayer(fullyConnectedDescriptor=ann.FullyConnectedDescriptor(),
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor())
+
+ assert layer
+
+
+def test_fullyconnected_layer_all_args():
+ net = ann.INetwork()
+ layer = net.AddFullyConnectedLayer(fullyConnectedDescriptor=ann.FullyConnectedDescriptor(),
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor(),
+ name='NAME1')
+
+ assert layer
+ assert 'NAME1' == layer.GetName()
+
+
+def test_DepthwiseConvolution2d_layer_optional_none():
+ net = ann.INetwork()
+ layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
+ weights=ann.ConstTensor())
+
+ assert layer
+
+
+def test_DepthwiseConvolution2d_layer_optional_provided():
+ net = ann.INetwork()
+ layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor())
+
+ assert layer
+
+
+def test_DepthwiseConvolution2d_layer_all_args():
+ net = ann.INetwork()
+ layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor(),
+ name='NAME1')
+
+ assert layer
+ assert 'NAME1' == layer.GetName()
+
+
+def test_Convolution2d_layer_optional_none():
+ net = ann.INetwork()
+ layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
+ weights=ann.ConstTensor())
+
+ assert layer
+
+
+def test_Convolution2d_layer_optional_provided():
+ net = ann.INetwork()
+ layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor())
+
+ assert layer
+
+
+def test_Convolution2d_layer_all_args():
+ net = ann.INetwork()
+ layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
+ weights=ann.ConstTensor(),
+ biases=ann.ConstTensor(),
+ name='NAME1')
+
+ assert layer
+ assert 'NAME1' == layer.GetName()
diff --git a/python/pyarmnn/test/test_onnx_parser.py b/python/pyarmnn/test/test_onnx_parser.py
new file mode 100644
index 0000000000..fe28b27e7f
--- /dev/null
+++ b/python/pyarmnn/test/test_onnx_parser.py
@@ -0,0 +1,110 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+import pyarmnn as ann
+import numpy as np
+from typing import List
+
+@pytest.fixture()
+def parser(shared_data_folder):
+ """
+ Parse and setup the test network (mobilenetv2) to be used for the tests below
+ """
+
+ # create onnx parser
+ parser = ann.IOnnxParser()
+
+ # path to model
+ path_to_model = os.path.join(shared_data_folder, 'mobilenetv2-1.0.onnx')
+
+ # parse onnx binary & create network
+ parser.CreateNetworkFromBinaryFile(path_to_model)
+
+ yield parser
+
+
+def test_onnx_parser_swig_destroy():
+ assert ann.IOnnxParser.__swig_destroy__, "There is a swig python destructor defined"
+ assert ann.IOnnxParser.__swig_destroy__.__name__ == "delete_IOnnxParser"
+
+
+def test_check_onnx_parser_swig_ownership(parser):
+ # Check to see that SWIG has ownership for parser. This instructs SWIG to take
+ # ownership of the return value. This allows the value to be automatically
+ # garbage-collected when it is no longer in use
+ assert parser.thisown
+
+
+def test_onnx_parser_get_network_input_binding_info(parser):
+ input_binding_info = parser.GetNetworkInputBindingInfo("data")
+
+ tensor = input_binding_info[1]
+ assert tensor.GetDataType() == 1
+ assert tensor.GetNumDimensions() == 4
+ assert tensor.GetNumElements() == 150528
+ assert tensor.GetQuantizationOffset() == 0
+ assert tensor.GetQuantizationScale() == 0
+
+
+def test_onnx_parser_get_network_output_binding_info(parser):
+ output_binding_info = parser.GetNetworkOutputBindingInfo("mobilenetv20_output_flatten0_reshape0")
+
+ tensor = output_binding_info[1]
+ assert tensor.GetDataType() == 1
+ assert tensor.GetNumDimensions() == 2
+ assert tensor.GetNumElements() == 1000
+ assert tensor.GetQuantizationOffset() == 0
+ assert tensor.GetQuantizationScale() == 0
+
+
+def test_onnx_filenotfound_exception(shared_data_folder):
+ parser = ann.IOnnxParser()
+
+ # path to model
+ path_to_model = os.path.join(shared_data_folder, 'some_unknown_model.onnx')
+
+ # parse onnx binary & create network
+
+ with pytest.raises(RuntimeError) as err:
+ parser.CreateNetworkFromBinaryFile(path_to_model)
+
+ # Only check for part of the exception since the exception returns
+ # absolute path which will change on different machines.
+ assert 'Invalid (null) filename' in str(err.value)
+
+
+def test_onnx_parser_end_to_end(shared_data_folder):
+ parser = ann.IOnnxParser = ann.IOnnxParser()
+
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mobilenetv2-1.0.onnx'))
+
+ # load test image data stored in data.npy
+ input_binding_info = parser.GetNetworkInputBindingInfo("data")
+ input_tensor_data = np.load(os.path.join(shared_data_folder, 'onnx_parser/mobilenetv20_data.npy')).astype(np.float32)
+
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ assert 0 == len(messages)
+
+ net_id, messages = runtime.LoadNetwork(opt_network)
+
+ assert "" == messages
+
+ input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
+ output_tensors = ann.make_output_tensors([parser.GetNetworkOutputBindingInfo("mobilenetv20_output_flatten0_reshape0")])
+
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ output = ann.workload_tensors_to_ndarray(output_tensors)
+
+ # load golden output file to compare the output results with
+ golden_output = np.load(os.path.join(shared_data_folder, 'onnx_parser/mobilenetv20_output_flatten0_reshape0.npy'))
+
+ # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
+ np.testing.assert_almost_equal(output[0], golden_output, decimal=4)
diff --git a/python/pyarmnn/test/test_profiling_utilities.py b/python/pyarmnn/test/test_profiling_utilities.py
new file mode 100644
index 0000000000..57f32e80ac
--- /dev/null
+++ b/python/pyarmnn/test/test_profiling_utilities.py
@@ -0,0 +1,63 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+
+import pyarmnn as ann
+
+
+class MockIProfiler:
+ def __init__(self, json_string):
+ self._profile_json = json_string
+
+ def as_json(self):
+ return self._profile_json
+
+
+@pytest.fixture()
+def mock_profiler(shared_data_folder):
+ path_to_file = os.path.join(shared_data_folder, 'profile_out.json')
+ with open(path_to_file, 'r') as file:
+ profiler_output = file.read()
+ return MockIProfiler(profiler_output)
+
+
+def test_inference_exec(mock_profiler):
+ profiling_data_obj = ann.get_profiling_data(mock_profiler)
+
+ assert (len(profiling_data_obj.inference_data) > 0)
+ assert (len(profiling_data_obj.per_workload_execution_data) > 0)
+
+ # Check each total execution time
+ assert (profiling_data_obj.inference_data["execution_time"] == [16035243.953000, 16096248.590000, 16138614.290000,
+ 16140544.388000, 16228118.274000, 16543585.760000])
+ assert (profiling_data_obj.inference_data["time_unit"] == "us")
+
+
+@pytest.mark.parametrize("exec_times, unit, backend, workload", [([1233915.166, 1221125.149,
+ 1228359.494, 1235065.662,
+ 1244369.694, 1240633.922],
+ 'us',
+ 'CpuRef',
+ 'RefConvolution2dWorkload_Execute_#25'),
+ ([270.64, 256.379,
+ 269.664, 259.449,
+ 266.65, 277.05],
+ 'us',
+ 'CpuAcc',
+ 'NeonActivationWorkload_Execute_#70'),
+ ([715.474, 729.23,
+ 711.325, 729.151,
+ 741.231, 729.702],
+ 'us',
+ 'GpuAcc',
+ 'ClConvolution2dWorkload_Execute_#80')
+ ])
+def test_profiler_workloads(mock_profiler, exec_times, unit, backend, workload):
+ profiling_data_obj = ann.get_profiling_data(mock_profiler)
+
+ work_load_exec = profiling_data_obj.per_workload_execution_data[workload]
+ assert work_load_exec["execution_time"] == exec_times
+ assert work_load_exec["time_unit"] == unit
+ assert work_load_exec["backend"] == backend
diff --git a/python/pyarmnn/test/test_quantize_and_dequantize.py b/python/pyarmnn/test/test_quantize_and_dequantize.py
new file mode 100644
index 0000000000..d0c711ac13
--- /dev/null
+++ b/python/pyarmnn/test/test_quantize_and_dequantize.py
@@ -0,0 +1,79 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import pytest
+import numpy as np
+
+import pyarmnn as ann
+
+# import generated so we can test for Dequantize_* and Quantize_*
+# functions not available in the public API.
+import pyarmnn._generated.pyarmnn as gen_ann
+
+
+@pytest.mark.parametrize('method', ['Quantize_uint8_t',
+ 'Quantize_int16_t',
+ 'Quantize_int32_t',
+ 'Dequantize_uint8_t',
+ 'Dequantize_int16_t',
+ 'Dequantize_int32_t'])
+def test_quantize_exists(method):
+ assert method in dir(gen_ann) and callable(getattr(gen_ann, method))
+
+
+@pytest.mark.parametrize('dt, min, max', [('uint8', 0, 255),
+ ('int16', -32768, 32767),
+ ('int32', -2147483648, 2147483647)])
+def test_quantize_uint8_output(dt, min, max):
+ result = ann.quantize(3.3274056911468506, 0.02620004490017891, 128, dt)
+ assert type(result) is int and min <= result <= max
+
+
+@pytest.mark.parametrize('dt', ['uint8',
+ 'int16',
+ 'int32'])
+def test_dequantize_uint8_output(dt):
+ result = ann.dequantize(3, 0.02620004490017891, 128, dt)
+ assert type(result) is float
+
+
+def test_quantize_unsupported_dtype():
+ with pytest.raises(ValueError) as err:
+ ann.quantize(3.3274056911468506, 0.02620004490017891, 128, 'int8')
+
+ assert 'Unexpected target datatype int8 given.' in str(err.value)
+
+
+def test_dequantize_unsupported_dtype():
+ with pytest.raises(ValueError) as err:
+ ann.dequantize(3, 0.02620004490017891, 128, 'int8')
+
+ assert 'Unexpected value datatype int8 given.' in str(err.value)
+
+
+def test_dequantize_value_range():
+ with pytest.raises(ValueError) as err:
+ ann.dequantize(-1, 0.02620004490017891, 128, 'uint8')
+
+ assert 'Value is not within range of the given datatype uint8' in str(err.value)
+
+
+@pytest.mark.parametrize('dt, data', [('uint8', np.uint8(255)),
+ ('int16', np.int16(32767)),
+ ('int32', np.int32(2147483647)),
+
+ ('uint8', np.int16(255)),
+ ('uint8', np.int32(255)),
+
+ ('int16', np.uint8(255)),
+ ('int16', np.int32(32767)),
+
+ ('int32', np.uint8(255)),
+ ('int32', np.int16(32767))
+
+ ])
+def test_dequantize_numpy_dt(dt, data):
+ result = ann.dequantize(data, 1, 0, dt)
+
+ assert type(result) is float
+
+ assert np.float32(data) == result
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
new file mode 100644
index 0000000000..c20d347785
--- /dev/null
+++ b/python/pyarmnn/test/test_runtime.py
@@ -0,0 +1,275 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+import numpy as np
+from PIL import Image
+import pyarmnn as ann
+import platform
+
+
+@pytest.fixture(scope="function")
+def random_runtime(shared_data_folder):
+ parser = ann.ITfLiteParser()
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'ssd_mobilenetv1.tflite'))
+ preferred_backends = [ann.BackendId('CpuRef')]
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ graphs_count = parser.GetSubgraphCount()
+
+ graph_id = graphs_count - 1
+ input_names = parser.GetSubgraphInputTensorNames(graph_id)
+
+ input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
+ input_tensor_id = input_binding_info[0]
+
+ input_tensor_info = input_binding_info[1]
+
+ output_names = parser.GetSubgraphOutputTensorNames(graph_id)
+
+ input_data = np.random.randint(255, size=input_tensor_info.GetNumElements(), dtype=np.uint8)
+
+ const_tensor_pair = (input_tensor_id, ann.ConstTensor(input_tensor_info, input_data))
+
+ input_tensors = [const_tensor_pair]
+
+ output_tensors = []
+
+ for index, output_name in enumerate(output_names):
+ out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
+
+ out_tensor_info = out_bind_info[1]
+ out_tensor_id = out_bind_info[0]
+
+ output_tensors.append((out_tensor_id,
+ ann.Tensor(out_tensor_info)))
+
+ yield preferred_backends, network, runtime, input_tensors, output_tensors
+
+
+@pytest.fixture(scope='function')
+def mobilenet_ssd_runtime(shared_data_folder):
+ parser = ann.ITfLiteParser()
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'ssd_mobilenetv1.tflite'))
+ graph_id = 0
+
+ input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, "normalized_input_image_tensor")
+
+ input_tensor_data = np.array(Image.open(os.path.join(shared_data_folder, 'cococat.jpeg')).resize((300, 300)), dtype=np.uint8)
+
+ preferred_backends = [ann.BackendId('CpuRef')]
+
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ print(messages)
+
+ net_id, messages = runtime.LoadNetwork(opt_network)
+
+ print(messages)
+
+ input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
+
+ output_names = parser.GetSubgraphOutputTensorNames(graph_id)
+ outputs_binding_info = []
+
+ for output_name in output_names:
+ outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name))
+
+ output_tensors = ann.make_output_tensors(outputs_binding_info)
+
+ yield runtime, net_id, input_tensors, output_tensors
+
+
+def test_python_disowns_network(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ runtime.LoadNetwork(opt_network)
+
+ assert not opt_network.thisown
+
+
+def test_load_network(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ net_id, messages = runtime.LoadNetwork(opt_network)
+ assert "" == messages
+ assert net_id == 0
+
+
+def test_load_network_properties_provided(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ properties = ann.INetworkProperties(True, True)
+ net_id, messages = runtime.LoadNetwork(opt_network, properties)
+ assert "" == messages
+ assert net_id == 0
+
+
+def test_unload_network_fails_for_invalid_net_id(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+
+ ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ with pytest.raises(RuntimeError) as err:
+ runtime.UnloadNetwork(9)
+
+ expected_error_message = "Failed to unload network."
+ assert expected_error_message in str(err.value)
+
+
+def test_enqueue_workload(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+ input_tensors = random_runtime[3]
+ output_tensors = random_runtime[4]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ net_id, _ = runtime.LoadNetwork(opt_network)
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+
+def test_enqueue_workload_fails_with_empty_input_tensors(random_runtime):
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+ input_tensors = []
+ output_tensors = random_runtime[4]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ net_id, _ = runtime.LoadNetwork(opt_network)
+ with pytest.raises(RuntimeError) as err:
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ expected_error_message = "Number of inputs provided does not match network."
+ assert expected_error_message in str(err.value)
+
+
+@pytest.mark.skipif(platform.processor() != 'x86_64', reason="Only run on x86, this is because these are exact results "
+ "for x86 only. The Juno produces slightly different "
+ "results meaning this test would fail.")
+@pytest.mark.parametrize('count', [5])
+def test_multiple_inference_runs_yield_same_result(count, mobilenet_ssd_runtime):
+ """
+ Test that results remain consistent among multiple runs of the same inference.
+ """
+ runtime = mobilenet_ssd_runtime[0]
+ net_id = mobilenet_ssd_runtime[1]
+ input_tensors = mobilenet_ssd_runtime[2]
+ output_tensors = mobilenet_ssd_runtime[3]
+
+ expected_results = [[0.17047899961471558, 0.22598055005073547, 0.8146906495094299, 0.7677907943725586,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0],
+ [16.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.80078125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [1.0]]
+
+ for _ in range(count):
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
+
+ for i in range(len(expected_results)):
+ assert all(output_vectors[i] == expected_results[i])
+
+
+@pytest.mark.juno
+def test_juno_inference_results(mobilenet_ssd_runtime):
+ """
+ Test inference results are sensible on a Juno.
+ For the Juno we allow +/-3% compared to the results on x86.
+ """
+ runtime = mobilenet_ssd_runtime[0]
+ net_id = mobilenet_ssd_runtime[1]
+ input_tensors = mobilenet_ssd_runtime[2]
+ output_tensors = mobilenet_ssd_runtime[3]
+
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
+
+ expected_outputs = [[pytest.approx(0.17047899961471558, 0.03), pytest.approx(0.22598055005073547, 0.03),
+ pytest.approx(0.8146906495094299, 0.03), pytest.approx(0.7677907943725586, 0.03),
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0],
+ [16.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [0.80078125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [1.0]]
+
+ for i in range(len(expected_outputs)):
+ assert all(output_vectors[i] == expected_outputs[i])
+
+
+def test_enqueue_workload_with_profiler(random_runtime):
+ """
+ Tests ArmNN's profiling extension
+ """
+ preferred_backends = random_runtime[0]
+ network = random_runtime[1]
+ runtime = random_runtime[2]
+ input_tensors = random_runtime[3]
+ output_tensors = random_runtime[4]
+
+ opt_network, _ = ann.Optimize(network, preferred_backends,
+ runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ net_id, _ = runtime.LoadNetwork(opt_network)
+
+ profiler = runtime.GetProfiler(net_id)
+ # By default profiling should be turned off:
+ assert profiler.IsProfilingEnabled() is False
+
+ # Enable profiling:
+ profiler.EnableProfiling(True)
+ assert profiler.IsProfilingEnabled() is True
+
+ # Run the inference:
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ # Get profile output as a string:
+ str_profile = profiler.as_json()
+
+ # Verify that certain markers are present:
+ assert len(str_profile) != 0
+ assert str_profile.find('\"ArmNN\": {') > 0
+
+ # Get events analysis output as a string:
+ str_events_analysis = profiler.event_log()
+
+ assert "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" in str_events_analysis
+
+ assert profiler.thisown == 0
+
+
+def test_check_runtime_swig_ownership(random_runtime):
+ # Check to see that SWIG has ownership for runtime. This instructs SWIG to take
+ # ownership of the return value. This allows the value to be automatically
+ # garbage-collected when it is no longer in use
+ runtime = random_runtime[2]
+ assert runtime.thisown
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
new file mode 100644
index 0000000000..8061f26054
--- /dev/null
+++ b/python/pyarmnn/test/test_setup.py
@@ -0,0 +1,100 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+import sys
+import shutil
+
+import pytest
+
+sys.path.append(os.path.abspath('..'))
+from setup import find_armnn, find_includes, linux_gcc_lib_search, check_armnn_version
+
+
+@pytest.fixture(autouse=True)
+def _setup_armnn(tmpdir):
+ includes = str(os.path.join(tmpdir, 'include'))
+ libs = str(os.path.join(tmpdir, 'lib'))
+ os.environ["TEST_ARMNN_INCLUDE"] = includes
+ os.environ["TEST_ARMNN_LIB"] = libs
+ os.environ["EMPTY_ARMNN_INCLUDE"] = ''
+
+ os.mkdir(includes)
+ os.mkdir(libs)
+
+ with open(os.path.join(libs, "libarmnn.so"), "w"):
+ pass
+
+ with open(os.path.join(libs, "libarmnnSomeThing1.so"), "w"):
+ pass
+ with open(os.path.join(libs, "libarmnnSomeThing1.so.1"), "w"):
+ pass
+ with open(os.path.join(libs, "libarmnnSomeThing1.so.1.2"), "w"):
+ pass
+
+ with open(os.path.join(libs, "libarmnnSomeThing2.so"), "w"):
+ pass
+
+ with open(os.path.join(libs, "libSomeThing3.so"), "w"):
+ pass
+
+ yield
+
+ del os.environ["TEST_ARMNN_INCLUDE"]
+ del os.environ["TEST_ARMNN_LIB"]
+ del os.environ["EMPTY_ARMNN_INCLUDE"]
+ shutil.rmtree(includes)
+ shutil.rmtree(libs)
+
+
+def test_find_armnn(tmpdir):
+ lib_names, lib_paths = find_armnn(lib_name='libarmnn*.so',
+ armnn_libs_env="TEST_ARMNN_LIB",
+ default_lib_search=("/lib",))
+ armnn_includes = find_includes(armnn_include_env="TEST_ARMNN_INCLUDE")
+
+ assert [':libarmnn.so', ':libarmnnSomeThing1.so', ':libarmnnSomeThing2.so'] == sorted(lib_names)
+ assert [os.path.join(tmpdir, 'lib')] == lib_paths
+ assert [os.path.join(tmpdir, 'include')] == armnn_includes
+
+
+def test_find_armnn_default_path(tmpdir):
+ lib_names, lib_paths = find_armnn(lib_name='libarmnn*.so',
+ armnn_libs_env="RUBBISH_LIB",
+ default_lib_search=(os.environ["TEST_ARMNN_LIB"],))
+ armnn_includes = find_includes('TEST_ARMNN_INCLUDE')
+ assert [':libarmnn.so', ':libarmnnSomeThing1.so', ':libarmnnSomeThing2.so'] == sorted(lib_names)
+ assert [os.path.join(tmpdir, 'lib')] == lib_paths
+ assert [os.path.join(tmpdir, 'include')] == armnn_includes
+
+
+def test_not_find_armnn(tmpdir):
+ with pytest.raises(RuntimeError) as err:
+ find_armnn(lib_name='libarmnn*.so', armnn_libs_env="RUBBISH_LIB",
+ default_lib_search=("/lib",))
+
+ assert 'ArmNN library libarmnn*.so was not found in (\'/lib\',)' in str(err.value)
+
+
+@pytest.mark.parametrize("env", ["RUBBISH_INCLUDE", "EMPTY_ARMNN_INCLUDE"])
+def test_rubbish_armnn_include(tmpdir, env):
+ includes = find_includes(armnn_include_env=env)
+ assert includes == ['/usr/local/include', '/usr/include']
+
+
+def test_gcc_serch_path():
+ assert linux_gcc_lib_search()
+
+
+def test_armnn_version():
+ check_armnn_version('20190800', '20190800')
+
+
+def test_incorrect_armnn_version():
+ with pytest.raises(AssertionError) as err:
+ check_armnn_version('20190800', '20190500')
+
+ assert 'Expected ArmNN version is 201905 but installed ArmNN version is 201908' in str(err.value)
+
+
+def test_armnn_version_patch_does_not_matter():
+ check_armnn_version('20190800', '20190801')
diff --git a/python/pyarmnn/test/test_supported_backends.py b/python/pyarmnn/test/test_supported_backends.py
new file mode 100644
index 0000000000..443f8bac08
--- /dev/null
+++ b/python/pyarmnn/test/test_supported_backends.py
@@ -0,0 +1,51 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT.
+import os
+import platform
+import pytest
+import pyarmnn as ann
+
+
+@pytest.fixture()
+def get_supported_backends_setup(shared_data_folder):
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ get_device_spec = runtime.GetDeviceSpec()
+ supported_backends = get_device_spec.GetSupportedBackends()
+
+ yield supported_backends
+
+
+def test_ownership():
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ device_spec = runtime.GetDeviceSpec()
+
+ assert not device_spec.thisown
+
+
+def test_to_string():
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ device_spec = runtime.GetDeviceSpec()
+ expected_str = "IDeviceSpec {{ supportedBackends: [" \
+ "{}" \
+ "]}}".format(', '.join(map(lambda b: str(b), device_spec.GetSupportedBackends())))
+
+ assert expected_str == str(device_spec)
+
+
+def test_get_supported_backends_cpu_ref(get_supported_backends_setup):
+ assert "CpuRef" in map(lambda b: str(b), get_supported_backends_setup)
+
+
+@pytest.mark.juno
+class TestNoneCpuRefBackends:
+
+ @pytest.mark.parametrize("backend",["CpuAcc", "NpuAcc"])
+ def test_get_supported_backends_cpu_acc(self, get_supported_backends_setup, backend):
+ assert backend in map(lambda b: str(b), get_supported_backends_setup)
+
diff --git a/python/pyarmnn/test/test_tensor.py b/python/pyarmnn/test/test_tensor.py
new file mode 100644
index 0000000000..bd043ed971
--- /dev/null
+++ b/python/pyarmnn/test/test_tensor.py
@@ -0,0 +1,135 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+from copy import copy
+
+import pytest
+import numpy as np
+import pyarmnn as ann
+
+
+def __get_tensor_info(dt):
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt)
+
+ return tensor_info
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, ann.DataType_QuantisedAsymm8])
+def test_create_tensor_with_info(dt):
+ tensor_info = __get_tensor_info(dt)
+ elements = tensor_info.GetNumElements()
+ num_bytes = tensor_info.GetNumBytes()
+ d_type = dt
+
+ tensor = ann.Tensor(tensor_info)
+
+ assert tensor_info != tensor.GetInfo(), "Different objects"
+ assert elements == tensor.GetNumElements()
+ assert num_bytes == tensor.GetNumBytes()
+ assert d_type == tensor.GetDataType()
+
+
+def test_create_tensor_undefined_datatype():
+ tensor_info = ann.TensorInfo()
+ tensor_info.SetDataType(99)
+
+ with pytest.raises(ValueError) as err:
+ ann.Tensor(tensor_info)
+
+ assert 'The data type provided for this Tensor is not supported.' in str(err.value)
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float32])
+def test_tensor_memory_output(dt):
+ tensor_info = __get_tensor_info(dt)
+ tensor = ann.Tensor(tensor_info)
+
+ # empty memory area because inference has not yet been run.
+ assert tensor.get_memory_area().tolist() # has random stuff
+ assert 4 == tensor.get_memory_area().itemsize, "it is float32"
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, ann.DataType_QuantisedAsymm8])
+def test_tensor__str__(dt):
+ tensor_info = __get_tensor_info(dt)
+ elements = tensor_info.GetNumElements()
+ num_bytes = tensor_info.GetNumBytes()
+ d_type = dt
+ dimensions = tensor_info.GetNumDimensions()
+
+ tensor = ann.Tensor(tensor_info)
+
+ assert str(tensor) == "Tensor{{DataType: {}, NumBytes: {}, NumDimensions: " \
+ "{}, NumElements: {}}}".format(d_type, num_bytes, dimensions, elements)
+
+
+def test_create_empty_tensor():
+ tensor = ann.Tensor()
+
+ assert 0 == tensor.GetNumElements()
+ assert 0 == tensor.GetNumBytes()
+ assert tensor.get_memory_area() is None
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, ann.DataType_QuantisedAsymm8])
+def test_create_tensor_from_tensor(dt):
+ tensor_info = __get_tensor_info(dt)
+ tensor = ann.Tensor(tensor_info)
+ copied_tensor = ann.Tensor(tensor)
+
+ assert copied_tensor != tensor, "Different objects"
+ assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
+ np.testing.assert_array_equal(copied_tensor.get_memory_area(), tensor.get_memory_area()), "Same memory area"
+ assert copied_tensor.GetNumElements() == tensor.GetNumElements()
+ assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
+ assert copied_tensor.GetDataType() == tensor.GetDataType()
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, ann.DataType_QuantisedAsymm8])
+def test_copy_tensor(dt):
+ tensor = ann.Tensor(__get_tensor_info(dt))
+ copied_tensor = copy(tensor)
+
+ assert copied_tensor != tensor, "Different objects"
+ assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
+ np.testing.assert_array_equal(copied_tensor.get_memory_area(), tensor.get_memory_area()), "Same memory area"
+ assert copied_tensor.GetNumElements() == tensor.GetNumElements()
+ assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
+ assert copied_tensor.GetDataType() == tensor.GetDataType()
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16, ann.DataType_QuantisedAsymm8])
+def test_copied_tensor_has_memory_area_access_after_deletion_of_original_tensor(dt):
+
+ tensor = ann.Tensor(__get_tensor_info(dt))
+
+ tensor.get_memory_area()[0] = 100
+
+ initial_mem_copy = np.array(tensor.get_memory_area())
+
+ assert 100 == initial_mem_copy[0]
+
+ copied_tensor = ann.Tensor(tensor)
+
+ del tensor
+ np.testing.assert_array_equal(copied_tensor.get_memory_area(), initial_mem_copy)
+ assert 100 == copied_tensor.get_memory_area()[0]
+
+
+def test_create_const_tensor_incorrect_args():
+ with pytest.raises(ValueError) as err:
+ ann.Tensor('something', 'something')
+
+ expected_error_message = "Incorrect number of arguments or type of arguments provided to create Tensor."
+ assert expected_error_message in str(err.value)
+
+
+@pytest.mark.parametrize("dt", [ann.DataType_Float16])
+def test_tensor_memory_output_fp16(dt):
+ # Check Tensor with float16
+ tensor_info = __get_tensor_info(dt)
+ tensor = ann.Tensor(tensor_info)
+
+ assert tensor.GetNumElements() == 6
+ assert tensor.GetNumBytes() == 12
+ assert tensor.GetDataType() == ann.DataType_Float16
diff --git a/python/pyarmnn/test/test_tensor_conversion.py b/python/pyarmnn/test/test_tensor_conversion.py
new file mode 100644
index 0000000000..bfff200e49
--- /dev/null
+++ b/python/pyarmnn/test/test_tensor_conversion.py
@@ -0,0 +1,97 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+import pyarmnn as ann
+import numpy as np
+
+
+@pytest.fixture(scope="function")
+def get_tensor_info_input(shared_data_folder):
+ """
+ Sample input tensor information.
+ """
+ parser = ann.ITfLiteParser()
+ parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'ssd_mobilenetv1.tflite'))
+ graph_id = 0
+
+ input_binding_info = [parser.GetNetworkInputBindingInfo(graph_id, 'normalized_input_image_tensor')]
+
+ yield input_binding_info
+
+
+@pytest.fixture(scope="function")
+def get_tensor_info_output(shared_data_folder):
+ """
+ Sample output tensor information.
+ """
+ parser = ann.ITfLiteParser()
+ parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'ssd_mobilenetv1.tflite'))
+ graph_id = 0
+
+ output_names = parser.GetSubgraphOutputTensorNames(graph_id)
+ outputs_binding_info = []
+
+ for output_name in output_names:
+ outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name))
+
+ yield outputs_binding_info
+
+
+def test_make_input_tensors(get_tensor_info_input):
+ input_tensor_info = get_tensor_info_input
+ input_data = []
+
+ for tensor_id, tensor_info in input_tensor_info:
+ input_data.append(np.random.randint(0, 255, size=(1, tensor_info.GetNumElements())).astype(np.uint8))
+
+ input_tensors = ann.make_input_tensors(input_tensor_info, input_data)
+ assert len(input_tensors) == 1
+
+ for tensor, tensor_info in zip(input_tensors, input_tensor_info):
+ # Because we created ConstTensor function, we cannot check type directly.
+ assert type(tensor[1]).__name__ == 'ConstTensor'
+ assert str(tensor[1].GetInfo()) == str(tensor_info[1])
+
+
+def test_make_output_tensors(get_tensor_info_output):
+ output_binding_info = get_tensor_info_output
+
+ output_tensors = ann.make_output_tensors(output_binding_info)
+ assert len(output_tensors) == 4
+
+ for tensor, tensor_info in zip(output_tensors, output_binding_info):
+ assert type(tensor[1]) == ann.Tensor
+ assert str(tensor[1].GetInfo()) == str(tensor_info[1])
+
+
+def test_workload_tensors_to_ndarray(get_tensor_info_output):
+ output_binding_info = get_tensor_info_output
+ output_tensors = ann.make_output_tensors(output_binding_info)
+
+ data = ann.workload_tensors_to_ndarray(output_tensors)
+
+ for i in range(0, len(output_tensors)):
+ assert len(data[i]) == output_tensors[i][1].GetNumElements()
+
+
+def test_make_input_tensors_fp16(get_tensor_info_input):
+ # Check ConstTensor with float16
+ input_tensor_info = get_tensor_info_input
+ input_data = []
+
+ for tensor_id, tensor_info in input_tensor_info:
+ input_data.append(np.random.randint(0, 255, size=(1, tensor_info.GetNumElements())).astype(np.float16))
+ tensor_info.SetDataType(ann.DataType_Float16) # set datatype to float16
+
+ input_tensors = ann.make_input_tensors(input_tensor_info, input_data)
+ assert len(input_tensors) == 1
+
+ for tensor, tensor_info in zip(input_tensors, input_tensor_info):
+ # Because we created ConstTensor function, we cannot check type directly.
+ assert type(tensor[1]).__name__ == 'ConstTensor'
+ assert str(tensor[1].GetInfo()) == str(tensor_info[1])
+ assert tensor[1].GetDataType() == ann.DataType_Float16
+ assert tensor[1].GetNumElements() == 270000
+ assert tensor[1].GetNumBytes() == 540000 # check each element is two byte
diff --git a/python/pyarmnn/test/test_tensor_info.py b/python/pyarmnn/test/test_tensor_info.py
new file mode 100644
index 0000000000..224f9d4ea9
--- /dev/null
+++ b/python/pyarmnn/test/test_tensor_info.py
@@ -0,0 +1,27 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import pyarmnn as ann
+
+
+def test_tensor_info_ctor_shape():
+ tensor_shape = ann.TensorShape((1, 1, 2))
+
+ tensor_info = ann.TensorInfo(tensor_shape, ann.DataType_QuantisedAsymm8, 0.5, 1)
+
+ assert 2 == tensor_info.GetNumElements()
+ assert 3 == tensor_info.GetNumDimensions()
+ assert ann.DataType_QuantisedAsymm8 == tensor_info.GetDataType()
+ assert 0.5 == tensor_info.GetQuantizationScale()
+ assert 1 == tensor_info.GetQuantizationOffset()
+
+ shape = tensor_info.GetShape()
+
+ assert 2 == shape.GetNumElements()
+ assert 3 == shape.GetNumDimensions()
+
+
+def test_tensor_info__str__():
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QuantisedAsymm8, 0.5, 1)
+
+ assert tensor_info.__str__() == "TensorInfo{DataType: 2, IsQuantized: 1, QuantizationScale: 0.500000, " \
+ "QuantizationOffset: 1, NumDimensions: 2, NumElements: 6}"
diff --git a/python/pyarmnn/test/test_tensor_shape.py b/python/pyarmnn/test/test_tensor_shape.py
new file mode 100644
index 0000000000..604e9b1ca4
--- /dev/null
+++ b/python/pyarmnn/test/test_tensor_shape.py
@@ -0,0 +1,75 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import pytest
+import pyarmnn as ann
+
+
+def test_tensor_shape_tuple():
+ tensor_shape = ann.TensorShape((1, 2, 3))
+
+ assert 3 == tensor_shape.GetNumDimensions()
+ assert 6 == tensor_shape.GetNumElements()
+
+
+def test_tensor_shape_one():
+ tensor_shape = ann.TensorShape((10,))
+ assert 1 == tensor_shape.GetNumDimensions()
+ assert 10 == tensor_shape.GetNumElements()
+
+
+@pytest.mark.skip("This will segfault before it reaches SWIG wrapper. ???")
+def test_tensor_shape_empty():
+ ann.TensorShape(())
+
+
+def test_tensor_shape_tuple_mess():
+ tensor_shape = ann.TensorShape((1, "2", 3.0))
+
+ assert 3 == tensor_shape.GetNumDimensions()
+ assert 6 == tensor_shape.GetNumElements()
+
+
+def test_tensor_shape_list():
+
+ with pytest.raises(TypeError) as err:
+ ann.TensorShape([1, 2, 3])
+
+ assert "Argument is not a tuple" in str(err.value)
+
+
+def test_tensor_shape_tuple_mess_fail():
+
+ with pytest.raises(TypeError) as err:
+ ann.TensorShape((1, "two", 3.0))
+
+ assert "All elements must be numbers" in str(err.value)
+
+
+def test_tensor_shape_varags():
+ with pytest.raises(TypeError) as err:
+ ann.TensorShape(1, 2, 3)
+
+ assert "__init__() takes 2 positional arguments but 4 were given" in str(err.value)
+
+
+def test_tensor_shape__get_item_out_of_bounds():
+ tensor_shape = ann.TensorShape((1, 2, 3))
+ with pytest.raises(ValueError) as err:
+ for i in range(4):
+ tensor_shape[i]
+
+ assert "Invalid dimension index: 3 (number of dimensions is 3)" in str(err.value)
+
+
+def test_tensor_shape__set_item_out_of_bounds():
+ tensor_shape = ann.TensorShape((1, 2, 3))
+ with pytest.raises(ValueError) as err:
+ for i in range(4):
+ tensor_shape[i] = 1
+
+ assert "Invalid dimension index: 3 (number of dimensions is 3)" in str(err.value)
+
+def test_tensor_shape___str__():
+ tensor_shape = ann.TensorShape((1, 2, 3))
+
+ assert str(tensor_shape) == "TensorShape{Shape(1, 2, 3), NumDimensions: 3, NumElements: 6}"
diff --git a/python/pyarmnn/test/test_tf_parser.py b/python/pyarmnn/test/test_tf_parser.py
new file mode 100644
index 0000000000..b776603604
--- /dev/null
+++ b/python/pyarmnn/test/test_tf_parser.py
@@ -0,0 +1,133 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+import pyarmnn as ann
+import numpy as np
+
+
+@pytest.fixture()
+def parser(shared_data_folder):
+ """
+ Parse and setup the test network (mobilenetv1) to be used for the tests below
+ """
+
+ # create tf parser
+ parser = ann.ITfParser()
+
+ # path to model
+ path_to_model = os.path.join(shared_data_folder, 'mobilenet_v1_1.0_224.pb')
+
+ # tensor shape [1, 224, 224, 3]
+ tensorshape = {'input': ann.TensorShape((1, 224, 224, 3))}
+
+ # requested_outputs
+ requested_outputs = ["MobilenetV1/Predictions/Reshape_1"]
+
+ # parse tf binary & create network
+ parser.CreateNetworkFromBinaryFile(path_to_model, tensorshape, requested_outputs)
+
+ yield parser
+
+
+def test_tf_parser_swig_destroy():
+ assert ann.ITfParser.__swig_destroy__, "There is a swig python destructor defined"
+ assert ann.ITfParser.__swig_destroy__.__name__ == "delete_ITfParser"
+
+
+def test_check_tf_parser_swig_ownership(parser):
+ # Check to see that SWIG has ownership for parser. This instructs SWIG to take
+ # ownership of the return value. This allows the value to be automatically
+ # garbage-collected when it is no longer in use
+ assert parser.thisown
+
+
+def test_tf_parser_get_network_input_binding_info(parser):
+ input_binding_info = parser.GetNetworkInputBindingInfo("input")
+
+ tensor = input_binding_info[1]
+ assert tensor.GetDataType() == 1
+ assert tensor.GetNumDimensions() == 4
+ assert tensor.GetNumElements() == 150528
+ assert tensor.GetQuantizationOffset() == 0
+ assert tensor.GetQuantizationScale() == 0
+
+
+def test_tf_parser_get_network_output_binding_info(parser):
+ output_binding_info = parser.GetNetworkOutputBindingInfo("MobilenetV1/Predictions/Reshape_1")
+
+ tensor = output_binding_info[1]
+ assert tensor.GetDataType() == 1
+ assert tensor.GetNumDimensions() == 2
+ assert tensor.GetNumElements() == 1001
+ assert tensor.GetQuantizationOffset() == 0
+ assert tensor.GetQuantizationScale() == 0
+
+
+def test_tf_filenotfound_exception(shared_data_folder):
+ parser = ann.ITfParser()
+
+ # path to model
+ path_to_model = os.path.join(shared_data_folder, 'some_unknown_model.pb')
+
+ # tensor shape [1, 1, 1, 1]
+ tensorshape = {'input': ann.TensorShape((1, 1, 1, 1))}
+
+ # requested_outputs
+ requested_outputs = [""]
+
+ # parse tf binary & create network
+
+ with pytest.raises(RuntimeError) as err:
+ parser.CreateNetworkFromBinaryFile(path_to_model, tensorshape, requested_outputs)
+
+ # Only check for part of the exception since the exception returns
+ # absolute path which will change on different machines.
+ assert 'failed to open' in str(err.value)
+
+
+def test_tf_parser_end_to_end(shared_data_folder):
+ parser = ann.ITfParser = ann.ITfParser()
+
+ tensorshape = {'input': ann.TensorShape((1, 224, 224, 3))}
+ requested_outputs = ["MobilenetV1/Predictions/Reshape_1"]
+
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mobilenet_v1_1.0_224.pb'),
+ tensorshape, requested_outputs)
+
+ input_binding_info = parser.GetNetworkInputBindingInfo("input")
+
+ # load test image data stored in input.npy
+ input_tensor_data = np.load(os.path.join(shared_data_folder, 'tf_parser/input.npy')).astype(np.float32)
+
+ preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
+
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+ assert 0 == len(messages)
+
+ net_id, messages = runtime.LoadNetwork(opt_network)
+
+ assert "" == messages
+
+ input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
+
+ outputs_binding_info = []
+
+ for output_name in requested_outputs:
+ outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(output_name))
+
+ output_tensors = ann.make_output_tensors(outputs_binding_info)
+
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+ output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
+
+ # load golden output file to compare the output results with
+ golden_output = np.load(os.path.join(shared_data_folder, 'tf_parser/golden_output.npy'))
+
+ # Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
+ np.testing.assert_almost_equal(output_vectors, golden_output, decimal=4)
diff --git a/python/pyarmnn/test/test_tflite_parser.py b/python/pyarmnn/test/test_tflite_parser.py
new file mode 100644
index 0000000000..ab492f6e4f
--- /dev/null
+++ b/python/pyarmnn/test/test_tflite_parser.py
@@ -0,0 +1,173 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+
+import pytest
+import pyarmnn as ann
+import numpy as np
+
+
+@pytest.fixture()
+def parser(shared_data_folder):
+ """
+ Parse and setup the test network (ssd_mobilenetv1) to be used for the tests below
+ """
+ parser = ann.ITfLiteParser()
+ parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'ssd_mobilenetv1.tflite'))
+
+ yield parser
+
+
+def test_tflite_parser_swig_destroy():
+ assert ann.ITfLiteParser.__swig_destroy__, "There is a swig python destructor defined"
+ assert ann.ITfLiteParser.__swig_destroy__.__name__ == "delete_ITfLiteParser"
+
+
+def test_check_tflite_parser_swig_ownership(parser):
+ # Check to see that SWIG has ownership for parser. This instructs SWIG to take
+ # ownership of the return value. This allows the value to be automatically
+ # garbage-collected when it is no longer in use
+ assert parser.thisown
+
+def test_tflite_get_sub_graph_count(parser):
+ graphs_count = parser.GetSubgraphCount()
+ assert graphs_count == 1
+
+
+def test_tflite_get_network_input_binding_info(parser):
+ graphs_count = parser.GetSubgraphCount()
+ graph_id = graphs_count - 1
+
+ input_names = parser.GetSubgraphInputTensorNames(graph_id)
+
+ input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
+
+ tensor = input_binding_info[1]
+ assert tensor.GetDataType() == 2
+ assert tensor.GetNumDimensions() == 4
+ assert tensor.GetNumElements() == 270000
+ assert tensor.GetQuantizationOffset() == 128
+ assert tensor.GetQuantizationScale() == 0.007874015718698502
+
+
+def test_tflite_get_network_output_binding_info(parser):
+ graphs_count = parser.GetSubgraphCount()
+ graph_id = graphs_count - 1
+
+ output_names = parser.GetSubgraphOutputTensorNames(graph_id)
+
+ output_binding_info1 = parser.GetNetworkOutputBindingInfo(graph_id, output_names[0])
+ output_binding_info2 = parser.GetNetworkOutputBindingInfo(graph_id, output_names[1])
+ output_binding_info3 = parser.GetNetworkOutputBindingInfo(graph_id, output_names[2])
+ output_binding_info4 = parser.GetNetworkOutputBindingInfo(graph_id, output_names[3])
+
+ # Check the tensor info retrieved from GetNetworkOutputBindingInfo
+ tensor1 = output_binding_info1[1]
+ tensor2 = output_binding_info2[1]
+ tensor3 = output_binding_info3[1]
+ tensor4 = output_binding_info4[1]
+
+ assert tensor1.GetDataType() == 1
+ assert tensor1.GetNumDimensions() == 3
+ assert tensor1.GetNumElements() == 40
+ assert tensor1.GetQuantizationOffset() == 0
+ assert tensor1.GetQuantizationScale() == 0.0
+
+ assert tensor2.GetDataType() == 1
+ assert tensor2.GetNumDimensions() == 2
+ assert tensor2.GetNumElements() == 10
+ assert tensor2.GetQuantizationOffset() == 0
+ assert tensor2.GetQuantizationScale() == 0.0
+
+ assert tensor3.GetDataType() == 1
+ assert tensor3.GetNumDimensions() == 2
+ assert tensor3.GetNumElements() == 10
+ assert tensor3.GetQuantizationOffset() == 0
+ assert tensor3.GetQuantizationScale() == 0.0
+
+ assert tensor4.GetDataType() == 1
+ assert tensor4.GetNumDimensions() == 1
+ assert tensor4.GetNumElements() == 1
+ assert tensor4.GetQuantizationOffset() == 0
+ assert tensor4.GetQuantizationScale() == 0.0
+
+
+def test_tflite_get_subgraph_input_tensor_names(parser):
+ graphs_count = parser.GetSubgraphCount()
+ graph_id = graphs_count - 1
+
+ input_names = parser.GetSubgraphInputTensorNames(graph_id)
+
+ assert input_names == ('normalized_input_image_tensor',)
+
+
+def test_tflite_get_subgraph_output_tensor_names(parser):
+ graphs_count = parser.GetSubgraphCount()
+ graph_id = graphs_count - 1
+
+ output_names = parser.GetSubgraphOutputTensorNames(graph_id)
+
+ assert output_names[0] == 'TFLite_Detection_PostProcess'
+ assert output_names[1] == 'TFLite_Detection_PostProcess:1'
+ assert output_names[2] == 'TFLite_Detection_PostProcess:2'
+ assert output_names[3] == 'TFLite_Detection_PostProcess:3'
+
+
+def test_tflite_filenotfound_exception(shared_data_folder):
+ parser = ann.ITfLiteParser()
+
+ with pytest.raises(RuntimeError) as err:
+ parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'some_unknown_network.tflite'))
+
+ # Only check for part of the exception since the exception returns
+ # absolute path which will change on different machines.
+ assert 'Cannot find the file' in str(err.value)
+
+
+def test_tflite_parser_end_to_end(shared_data_folder):
+ parser = ann.ITfLiteParser()
+
+ network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder,"inception_v3_quant.tflite"))
+
+ graphs_count = parser.GetSubgraphCount()
+ graph_id = graphs_count - 1
+
+ input_names = parser.GetSubgraphInputTensorNames(graph_id)
+ input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
+
+ output_names = parser.GetSubgraphOutputTensorNames(graph_id)
+
+ preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
+
+ options = ann.CreationOptions()
+ runtime = ann.IRuntime(options)
+
+ opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
+ assert 0 == len(messages)
+
+ net_id, messages = runtime.LoadNetwork(opt_network)
+ assert "" == messages
+
+ # Load test image data stored in input.npy
+ input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/inceptionv3_golden_input.npy'))
+ input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
+
+ output_tensors = []
+ for index, output_name in enumerate(output_names):
+ out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
+ out_tensor_info = out_bind_info[1]
+ out_tensor_id = out_bind_info[0]
+ output_tensors.append((out_tensor_id,
+ ann.Tensor(out_tensor_info)))
+
+ runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
+
+ output_vectors = []
+ for index, out_tensor in enumerate(output_tensors):
+ output_vectors.append(out_tensor[1].get_memory_area())
+
+ # Load golden output file to compare the output results with
+ expected_outputs = np.load(os.path.join(shared_data_folder, 'tflite_parser/inceptionv3_golden_output.npy'))
+
+ # Check that output matches golden output
+ np.testing.assert_allclose(output_vectors, expected_outputs, 0.08)
diff --git a/python/pyarmnn/test/test_types.py b/python/pyarmnn/test/test_types.py
new file mode 100644
index 0000000000..29c0b107bb
--- /dev/null
+++ b/python/pyarmnn/test/test_types.py
@@ -0,0 +1,27 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import pytest
+import pyarmnn as ann
+
+def test_activation_function():
+ assert 0 == ann.ActivationFunction_Sigmoid
+ assert 1 == ann.ActivationFunction_TanH
+ assert 2 == ann.ActivationFunction_Linear
+ assert 3 == ann.ActivationFunction_ReLu
+ assert 4 == ann.ActivationFunction_BoundedReLu
+ assert 5 == ann.ActivationFunction_SoftReLu
+ assert 6 == ann.ActivationFunction_LeakyReLu
+ assert 7 == ann.ActivationFunction_Abs
+ assert 8 == ann.ActivationFunction_Sqrt
+ assert 9 == ann.ActivationFunction_Square
+
+def test_permutation_vector():
+ pv = ann.PermutationVector((0, 2, 3, 1))
+ assert pv[0] == 0
+ assert pv[2] == 3
+
+ pv2 = ann.PermutationVector((0, 2, 3, 1))
+ assert pv == pv2
+
+ pv4 = ann.PermutationVector((0, 3, 1, 2))
+ assert pv.IsInverse(pv4)
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
new file mode 100644
index 0000000000..5cb6759673
--- /dev/null
+++ b/python/pyarmnn/test/test_version.py
@@ -0,0 +1,35 @@
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+import os
+import importlib
+
+
+def test_rel_version():
+ import pyarmnn._version as v
+ importlib.reload(v)
+ assert "dev" not in v.__version__
+ del v
+
+
+def test_dev_version():
+ import pyarmnn._version as v
+ os.environ["PYARMNN_DEV_VER"] = "1"
+
+ importlib.reload(v)
+
+ assert "19.11.0.dev1" == v.__version__
+
+ del os.environ["PYARMNN_DEV_VER"]
+ del v
+
+
+def test_arm_version_not_affected():
+ import pyarmnn._version as v
+ os.environ["PYARMNN_DEV_VER"] = "1"
+
+ importlib.reload(v)
+
+ assert "20191100" == v.__arm_ml_version__
+
+ del os.environ["PYARMNN_DEV_VER"]
+ del v
diff --git a/python/pyarmnn/tox.ini b/python/pyarmnn/tox.ini
new file mode 100644
index 0000000000..c182871f12
--- /dev/null
+++ b/python/pyarmnn/tox.ini
@@ -0,0 +1,64 @@
+; Copyright © 2019 Arm Ltd. All rights reserved.
+; SPDX-License-Identifier: MIT
+[tox]
+skip_missing_interpreters=true
+envlist =
+ py35
+ py36
+ py37
+
+[testenv]
+deps = pytest==5.2.0
+ pytest-cov==2.8.1
+ attrs==19.3.0
+ setuptools==41.6.0
+ numpy==1.17.2
+ pillow==6.1.0
+
+recreate = True
+whitelist_externals = /bin/sh
+commands =
+ python -m pytest test/ -v {posargs} --junit-xml=test_report_junit-{envname}.xml --cov=pyarmnn --cov-report xml:coverage-{envname}.xml
+
+
+[testenv:devenv]
+envdir = env
+basepython = python3.6
+usedevelop = True
+deps = {[testenv]deps}
+ tox
+recreate = True
+commands = python -c "import sys; print('Dev environment created: ' + sys.executable)"
+
+[testenv:gen]
+basepython = python3.6
+skip_install = True
+usedevelop = True
+passenv =
+ ARMNN_LIB
+ ARMNN_INCLUDE
+commands =
+ python setup.py clean --all
+ python ./swig_generate.py
+ python setup.py build_ext --inplace
+
+[testenv:doc]
+basepython = python3.6
+deps = pdoc3==0.6.3
+passenv =
+ PYARMNN_DEV_VER
+commands =
+ python ./scripts/generate_docs.py --html --output-dir docs pyarmnn --force
+
+[testenv:pylint]
+basepython = python3.6
+deps = pylint==2.3.1
+ numpy==1.17.2
+recreate = False
+skip_install = True
+usedevelop = True
+setenv =
+ PYTHONPATH = src
+commands =
+ sh -c "pylint --rcfile=pylintconfig src --output-format=parseable --reports=no > pylint_results.txt || true"
+