aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-10-22 11:12:07 +0100
committerDavid Monahan <david.monahan@arm.com>2021-11-08 19:05:11 +0000
commit5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch)
tree7f49f91e76f171041fe51c2c078b9271aa220b48 /python
parentd69cb904415621b066599dc20164bdb71558dc14 (diff)
downloadarmnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template<typename MemoryType>. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'python')
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py19
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py1
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i24
-rw-r--r--python/pyarmnn/test/test_const_tensor.py54
-rw-r--r--python/pyarmnn/test/test_runtime.py1
-rw-r--r--python/pyarmnn/test/test_tensor_info.py4
6 files changed, 79 insertions, 24 deletions
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py b/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
index 94995bdd8c..ab4305c18e 100644
--- a/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
+++ b/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
@@ -59,24 +59,31 @@ class ConstTensor(AnnConstTensor):
Raises:
TypeError: Unsupported input data type.
- ValueError: Unsupported tensor data type and incorrect input data size.
+ ValueError: Unsupported tensor data type, incorrect input data size and creation of ConstTensor from non-constant TensorInfo.
"""
self.__memory_area = None
# TensorInfo as first argument and numpy array as second
if len(args) > 1 and isinstance(args[0], TensorInfo):
- if isinstance(args[1], np.ndarray):
+ if not isinstance(args[1], np.ndarray):
+ raise TypeError('Data must be provided as a numpy array.')
+ # if TensorInfo IsConstant is false
+ elif not args[0].IsConstant():
+ raise ValueError('TensorInfo when initializing ConstTensor must be set to constant.')
+ else:
self.__create_memory_area(args[0].GetDataType(), args[0].GetNumBytes(), args[0].GetNumElements(),
args[1])
super().__init__(args[0], self.__memory_area.data)
- else:
- raise TypeError('Data must be provided as a numpy array.')
# copy constructor - reference to memory area is passed from copied const
# tensor and armnn's copy constructor is called
elif len(args) > 0 and isinstance(args[0], (ConstTensor, Tensor)):
- self.__memory_area = args[0].get_memory_area()
- super().__init__(args[0])
+ # if TensorInfo IsConstant is false
+ if not args[0].GetInfo().IsConstant():
+ raise ValueError('TensorInfo of Tensor when initializing ConstTensor must be set to constant.')
+ else:
+ self.__memory_area = args[0].get_memory_area()
+ super().__init__(args[0])
# empty tensor
elif len(args) == 0:
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py b/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
index 22b876896d..532db56cc3 100644
--- a/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
+++ b/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
@@ -54,6 +54,7 @@ def make_input_tensors(inputs_binding_info: List[Tuple],
for in_bind_info, in_data in zip(inputs_binding_info, input_data):
in_tensor_id = in_bind_info[0]
in_tensor_info = in_bind_info[1]
+ in_tensor_info.SetConstant()
input_tensors.append((in_tensor_id, ConstTensor(in_tensor_info, in_data)))
return input_tensors
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
index 0edf67d618..d8ef37d762 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
@@ -111,7 +111,8 @@ public:
TensorInfo(const TensorInfo& other);
TensorInfo(const TensorShape& shape, DataType dataType,
- float quantizationScale = 0.0f, int32_t quantizationOffset = 0);
+ float quantizationScale = 0.0f, int32_t quantizationOffset = 0,
+ bool isConstant = False);
%feature("docstring",
"
@@ -223,6 +224,26 @@ public:
") IsQuantized;
bool IsQuantized() const;
+ %feature("docstring",
+ "
+ Returns true if the tensor info is constant.
+
+ Returns:
+ bool: True if the tensor info is constant.
+
+ ") IsConstant;
+ bool IsConstant() const;
+
+ %feature("docstring",
+ "
+ Sets the tensor info to be constant.
+
+ Args:
+ IsConstant (bool): Sets tensor info to constant.
+
+ ") SetConstant;
+ void SetConstant(const bool IsConstant = True);
+
%feature("docstring",
@@ -254,6 +275,7 @@ public:
+ ", IsQuantized: " + std::to_string($self->IsQuantized())
+ ", QuantizationScale: " + std::to_string( $self->GetQuantizationScale())
+ ", QuantizationOffset: " + std::to_string($self->GetQuantizationOffset())
+ + ", IsConstant: " + std::to_string($self->IsConstant())
+ ", NumDimensions: " + std::to_string($self->GetNumDimensions())
+ ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
return tmp;
diff --git a/python/pyarmnn/test/test_const_tensor.py b/python/pyarmnn/test/test_const_tensor.py
index fa6327f19c..2358d65918 100644
--- a/python/pyarmnn/test/test_const_tensor.py
+++ b/python/pyarmnn/test/test_const_tensor.py
@@ -6,8 +6,8 @@ import numpy as np
import pyarmnn as ann
-def _get_tensor_info(dt):
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt)
+def _get_const_tensor_info(dt):
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt, 0.0, 0, True)
return tensor_info
@@ -23,7 +23,7 @@ def _get_tensor_info(dt):
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 4)).astype(np.int16))
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
def test_const_tensor_too_many_elements(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
num_bytes = tensor_info.GetNumBytes()
with pytest.raises(ValueError) as err:
@@ -43,7 +43,7 @@ def test_const_tensor_too_many_elements(dt, data):
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 2)).astype(np.int16))
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
def test_const_tensor_too_little_elements(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
num_bytes = tensor_info.GetNumBytes()
with pytest.raises(ValueError) as err:
@@ -63,7 +63,7 @@ def test_const_tensor_too_little_elements(dt, data):
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int16))
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
def test_const_tensor_multi_dimensional_input(dt, data):
- tensor = ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt), data)
+ tensor = ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, True), data)
assert data.size == tensor.GetNumElements()
assert data.nbytes == tensor.GetNumBytes()
@@ -72,7 +72,7 @@ def test_const_tensor_multi_dimensional_input(dt, data):
def test_create_const_tensor_from_tensor():
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32, 0.0, 0, True)
tensor = ann.Tensor(tensor_info)
copied_tensor = ann.ConstTensor(tensor)
@@ -85,7 +85,7 @@ def test_create_const_tensor_from_tensor():
def test_const_tensor_from_tensor_has_memory_area_access_after_deletion_of_original_tensor():
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32, 0.0, 0, True)
tensor = ann.Tensor(tensor_info)
tensor.get_memory_area()[0] = 100
@@ -125,7 +125,7 @@ def test_create_const_tensor_incorrect_args():
(-1, np.random.randint(1, size=(2, 3)).astype(np.float32)),
], ids=['unknown'])
def test_const_tensor_unsupported_datatype(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
with pytest.raises(ValueError) as err:
ann.ConstTensor(tensor_info, data)
@@ -142,7 +142,7 @@ def test_const_tensor_unsupported_datatype(dt, data):
(ann.DataType_QSymmS8, [[1, 1, 1], [1, 1, 1]])
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8'])
def test_const_tensor_incorrect_input_datatype(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
with pytest.raises(TypeError) as err:
ann.ConstTensor(tensor_info, data)
@@ -163,7 +163,7 @@ def test_const_tensor_incorrect_input_datatype(dt, data):
class TestNumpyDataTypes:
def test_copy_const_tensor(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
tensor = ann.ConstTensor(tensor_info, data)
copied_tensor = ann.ConstTensor(tensor)
@@ -175,7 +175,7 @@ class TestNumpyDataTypes:
assert copied_tensor.GetDataType() == tensor.GetDataType()
def test_const_tensor__str__(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
d_type = tensor_info.GetDataType()
num_dimensions = tensor_info.GetNumDimensions()
num_bytes = tensor_info.GetNumBytes()
@@ -186,7 +186,7 @@ class TestNumpyDataTypes:
"{}, NumElements: {}}}".format(d_type, num_bytes, num_dimensions, num_elements)
def test_const_tensor_with_info(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
elements = tensor_info.GetNumElements()
num_bytes = tensor_info.GetNumBytes()
d_type = dt
@@ -199,7 +199,7 @@ class TestNumpyDataTypes:
assert d_type == tensor.GetDataType()
def test_immutable_memory(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
tensor = ann.ConstTensor(tensor_info, data)
@@ -217,7 +217,7 @@ class TestNumpyDataTypes:
ann.DataType_Signed32: np.int32,
ann.DataType_Float16: np.float16}
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
tensor = ann.ConstTensor(tensor_info, data)
assert np_data_type_mapping[tensor.GetDataType()] == data.dtype
@@ -242,10 +242,34 @@ def test_numpy_dtype_mismatch_ann_dtype(dt, data):
ann.DataType_Signed32: np.int32,
ann.DataType_Float16: np.float16}
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
with pytest.raises(TypeError) as err:
ann.ConstTensor(tensor_info, data)
assert str(err.value) == "Expected data to have type {} for type {} but instead got numpy.{}".format(
np_data_type_mapping[dt], dt, data.dtype)
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, np.random.randint(1, size=(2, 3)).astype(np.float32)),
+ (ann.DataType_Float16, np.random.randint(1, size=(2, 3)).astype(np.float16)),
+ (ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
+ (ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
+ (ann.DataType_QSymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
+ (ann.DataType_Signed32, np.random.randint(1, size=(2, 3)).astype(np.int32)),
+ (ann.DataType_QSymmS16, np.random.randint(1, size=(2, 3)).astype(np.int16))
+ ], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
+class TestConstTensorConstructorErrors:
+
+ def test_tensorinfo_isconstant_not_set(self, dt, data):
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, False), data)
+
+ assert str(err.value) == "TensorInfo when initializing ConstTensor must be set to constant."
+
+ def test_tensor_tensorinfo_isconstant_not_set(self, dt, data):
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(ann.Tensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, False), data))
+
+ assert str(err.value) == "TensorInfo of Tensor when initializing ConstTensor must be set to constant." \ No newline at end of file
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index fbdd8044ce..e558e84e28 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -27,6 +27,7 @@ def random_runtime(shared_data_folder):
input_tensor_id = input_binding_info[0]
input_tensor_info = input_binding_info[1]
+ input_tensor_info.SetConstant()
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
diff --git a/python/pyarmnn/test/test_tensor_info.py b/python/pyarmnn/test/test_tensor_info.py
index dc73533869..e54e2a998c 100644
--- a/python/pyarmnn/test/test_tensor_info.py
+++ b/python/pyarmnn/test/test_tensor_info.py
@@ -21,7 +21,7 @@ def test_tensor_info_ctor_shape():
def test_tensor_info__str__():
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QAsymmU8, 0.5, 1)
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QAsymmU8, 0.5, 1, True)
assert tensor_info.__str__() == "TensorInfo{DataType: 2, IsQuantized: 1, QuantizationScale: 0.500000, " \
- "QuantizationOffset: 1, NumDimensions: 2, NumElements: 6}"
+ "QuantizationOffset: 1, IsConstant: 1, NumDimensions: 2, NumElements: 6}"