aboutsummaryrefslogtreecommitdiff
path: root/python/pyarmnn/src
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyarmnn/src')
-rw-r--r--python/pyarmnn/src/pyarmnn/_version.py4
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i7
2 files changed, 7 insertions, 4 deletions
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index 7c0940e7f3..d1b1ca290c 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: MIT
import os
-version_info = (29, 0, 0)
+version_info = (30, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
@@ -24,7 +24,7 @@ def check_armnn_version(installed_armnn_version: str, expected_armnn_version: st
"""Compares expected Arm NN version and Arm NN version used to build the package.
Args:
- installed_armnn_version (str): Arm NN version used to generate the package (e.g. 29.0.0)
+ installed_armnn_version (str): Arm NN version used to generate the package (e.g. 30.0.0)
expected_armnn_version (str): Expected Arm NN version
Returns:
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index a2f57a3aa9..55b6795c90 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -29,7 +29,7 @@ Contains:
that can not be reduced will be left in Fp32.
m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers
that can not be reduced will be left in Fp32.
- m_ImportEnabled (bool): Enable memory import.
+ m_ImportEnabled (bool): Enable memory import of inport tensors.
m_shapeInferenceMethod: The ShapeInferenceMethod modifies how the output shapes are treated.
When ValidateOnly is selected, the output shapes are inferred from the input parameters
of the layer and any mismatch is reported.
@@ -38,6 +38,7 @@ Contains:
with tensors which rank or dimension sizes are not specified explicitly, however this
information can be calculated from the inputs.
m_ModelOptions: List of backends optimisation options.
+ m_ExportEnabled (bool): Enable memory export of output tensors.
") OptimizerOptions;
@@ -51,7 +52,8 @@ struct OptimizerOptions
bool reduceFp32ToBf16 = false,
ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
bool importEnabled = false,
- std::vector<armnn::BackendOptions> modelOptions = {});
+ std::vector<armnn::BackendOptions> modelOptions = {},
+ bool exportEnabled = false);
bool m_ReduceFp32ToBf16;
bool m_ReduceFp32ToFp16;
@@ -59,6 +61,7 @@ struct OptimizerOptions
ShapeInferenceMethod m_shapeInferenceMethod;
bool m_ImportEnabled;
std::vector<armnn::BackendOptions> m_ModelOptions;
+ bool m_ExportEnabled;
};
%model_options_clear;