aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-10-22 11:12:07 +0100
committerDavid Monahan <david.monahan@arm.com>2021-11-08 19:05:11 +0000
commit5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch)
tree7f49f91e76f171041fe51c2c078b9271aa220b48
parentd69cb904415621b066599dc20164bdb71558dc14 (diff)
downloadarmnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template<typename MemoryType>. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
-rw-r--r--delegate/src/DelegateUtils.hpp12
-rw-r--r--delegate/src/armnn_delegate.cpp4
-rw-r--r--include/armnn/Tensor.hpp38
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py19
-rw-r--r--python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py1
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i24
-rw-r--r--python/pyarmnn/test/test_const_tensor.py54
-rw-r--r--python/pyarmnn/test/test_runtime.py1
-rw-r--r--python/pyarmnn/test/test_tensor_info.py4
-rw-r--r--samples/AsyncExecutionSample.cpp9
-rw-r--r--samples/CustomMemoryAllocatorSample.cpp7
-rw-r--r--samples/DynamicSample.cpp6
-rw-r--r--samples/SimpleSample.cpp11
-rw-r--r--src/armnn/Runtime.cpp4
-rw-r--r--src/armnn/Runtime.hpp4
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp8
-rw-r--r--src/armnn/optimizations/FuseBatchNorm.hpp2
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp922
-rw-r--r--src/armnn/test/CreateWorkload.hpp4
-rw-r--r--src/armnn/test/DebugCallbackTest.cpp4
-rw-r--r--src/armnn/test/FlowControl.cpp2
-rw-r--r--src/armnn/test/GraphTests.cpp2
-rw-r--r--src/armnn/test/NetworkTests.cpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp22
-rw-r--r--src/armnn/test/RuntimeTests.cpp10
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp16
-rw-r--r--src/armnn/test/TensorTest.cpp42
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp24
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp8
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp27
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp21
-rw-r--r--src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp4
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp1
-rw-r--r--src/armnnSerializer/test/ActivationSerializationTests.cpp4
-rw-r--r--src/armnnSerializer/test/LstmSerializationTests.cpp118
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp50
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp1
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp1
-rw-r--r--src/armnnUtils/TensorIOUtils.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp2
-rw-r--r--src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp6
-rw-r--r--src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp6
-rw-r--r--src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp23
-rw-r--r--src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp14
-rw-r--r--src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp12
-rw-r--r--src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp19
-rw-r--r--src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp8
-rw-r--r--src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp6
-rw-r--r--src/backends/cl/test/ClContextSerializerTests.cpp4
-rw-r--r--src/backends/cl/test/ClCustomAllocatorTests.cpp6
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp5
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp4
-rw-r--r--src/backends/cl/test/Fp16SupportTest.cpp6
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp109
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp8
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp20
-rw-r--r--src/profiling/test/FileOnlyProfilingDecoratorTests.cpp8
-rw-r--r--src/profiling/test/ProfilingTestUtils.cpp8
85 files changed, 1190 insertions, 676 deletions
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 940d269c5b..e0de809ab3 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -420,6 +420,7 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
safeShape.data(),
dimensionsSpecificity);
ret = armnn::TensorInfo(tensorShape, type);
+ ret.SetConstant(true);
}
else
{
@@ -442,7 +443,16 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
tensorDims.data(),
dimensionsSpecificity);
- ret = armnn::TensorInfo(tensorShape, type);
+
+ if(tflite::IsConstantTensor(&tfLiteTensor))
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ ret.SetConstant(true);
+ }
+ else
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
}
auto quantizationInfo = tfLiteTensor.quantization;
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 4c1bc57fc2..0069b4fe0e 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -422,7 +422,9 @@ TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfL
if (tensor->allocation_type != kTfLiteMmapRo)
{
const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
- const armnn::ConstTensor inputTensor(inputBinding.second, tensor->data.data);
+ armnn::TensorInfo inputTensorInfo = inputBinding.second;
+ inputTensorInfo.SetConstant(true);
+ const armnn::ConstTensor inputTensor(inputTensorInfo, tensor->data.data);
inputTensors.emplace_back(inputIdx, inputTensor);
++inputIndex;
diff --git a/include/armnn/Tensor.hpp b/include/armnn/Tensor.hpp
index 6f6abe187b..0095a03e52 100644
--- a/include/armnn/Tensor.hpp
+++ b/include/armnn/Tensor.hpp
@@ -334,27 +334,59 @@ public:
this->GetInfo().SetConstant();
}
- /// Can be implicitly constructed from non-const Tensor.
+ /// ConstTensor implicitly constructed from non-const Tensor.
+ ///
+ /// @param other - reference to a constant Tensor.
+ ///
+ /// @throws InvalidArgumentException when Tensor parameter TensorInfo is non-constant.
ConstTensor(const Tensor& other) : BaseTensor<const void*>(other.GetInfo(), other.GetMemoryArea())
{
- this->GetInfo().SetConstant();
+ if (!this->GetInfo().IsConstant())
+ {
+ throw InvalidArgumentException("Invalid attempt to construct ConstTensor "
+ "from Tensor due to non-constant TensorInfo");
+ }
}
/// Constructor from a backing container.
+ ///
/// @param container - An stl-like container type which implements data() and size() methods.
/// Presence of data() and size() is a strong indicator of the continuous memory layout of the container,
/// which is a requirement for Tensor data. Tensor instances do not claim ownership of referenced memory regions,
/// that is, no attempt will be made by ArmNN to free these memory regions automatically.
+ ///
+ /// @throws InvalidArgumentException when isConstant parameter of input TensorInfo is false.
template < template<typename, typename...> class ContainerType, typename T, typename...ContainerArgs >
ConstTensor(const TensorInfo& info, const ContainerType<T, ContainerArgs...>& container)
: BaseTensor<const void*>(info, container.data())
{
- this->GetInfo().SetConstant();
+ if (!this->GetInfo().IsConstant())
+ {
+ throw InvalidArgumentException("Invalid attempt to construct ConstTensor from non-constant TensorInfo.");
+ }
if (container.size() * sizeof(T) != info.GetNumBytes())
{
throw InvalidArgumentException("Container size is not correct");
}
}
+
+ /// ConstTensor constructed from TensorInfo and MemoryType template (a raw memory pointer).
+ ///
+ /// @param info - reference to a constant TensorInfo.
+ /// @param memoryArea - Region of CPU-addressable memory where tensor data will be stored. Must be valid while
+ /// workloads are on the fly. Tensor instances do not claim ownership of referenced memory regions, that is,
+ /// no attempt will be made by ArmNN to free these memory regions automatically.
+ ///
+ /// @throws InvalidArgumentException when TensorInfo isConstant parameter is false.
+ template<typename MemoryType>
+ ConstTensor(const TensorInfo& info, MemoryType memoryArea)
+ : BaseTensor<const void*>(info, memoryArea)
+ {
+ if (!this->GetInfo().IsConstant())
+ {
+ throw InvalidArgumentException("Invalid attempt to construct ConstTensor from non-constant TensorInfo.");
+ }
+ }
};
using InputTensors = std::vector<std::pair<LayerBindingId, class ConstTensor>>;
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py b/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
index 94995bdd8c..ab4305c18e 100644
--- a/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
+++ b/python/pyarmnn/src/pyarmnn/_tensor/const_tensor.py
@@ -59,24 +59,31 @@ class ConstTensor(AnnConstTensor):
Raises:
TypeError: Unsupported input data type.
- ValueError: Unsupported tensor data type and incorrect input data size.
+ ValueError: Unsupported tensor data type, incorrect input data size and creation of ConstTensor from non-constant TensorInfo.
"""
self.__memory_area = None
# TensorInfo as first argument and numpy array as second
if len(args) > 1 and isinstance(args[0], TensorInfo):
- if isinstance(args[1], np.ndarray):
+ if not isinstance(args[1], np.ndarray):
+ raise TypeError('Data must be provided as a numpy array.')
+ # if TensorInfo IsConstant is false
+ elif not args[0].IsConstant():
+ raise ValueError('TensorInfo when initializing ConstTensor must be set to constant.')
+ else:
self.__create_memory_area(args[0].GetDataType(), args[0].GetNumBytes(), args[0].GetNumElements(),
args[1])
super().__init__(args[0], self.__memory_area.data)
- else:
- raise TypeError('Data must be provided as a numpy array.')
# copy constructor - reference to memory area is passed from copied const
# tensor and armnn's copy constructor is called
elif len(args) > 0 and isinstance(args[0], (ConstTensor, Tensor)):
- self.__memory_area = args[0].get_memory_area()
- super().__init__(args[0])
+ # if TensorInfo IsConstant is false
+ if not args[0].GetInfo().IsConstant():
+ raise ValueError('TensorInfo of Tensor when initializing ConstTensor must be set to constant.')
+ else:
+ self.__memory_area = args[0].get_memory_area()
+ super().__init__(args[0])
# empty tensor
elif len(args) == 0:
diff --git a/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py b/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
index 22b876896d..532db56cc3 100644
--- a/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
+++ b/python/pyarmnn/src/pyarmnn/_tensor/workload_tensors.py
@@ -54,6 +54,7 @@ def make_input_tensors(inputs_binding_info: List[Tuple],
for in_bind_info, in_data in zip(inputs_binding_info, input_data):
in_tensor_id = in_bind_info[0]
in_tensor_info = in_bind_info[1]
+ in_tensor_info.SetConstant()
input_tensors.append((in_tensor_id, ConstTensor(in_tensor_info, in_data)))
return input_tensors
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
index 0edf67d618..d8ef37d762 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
@@ -111,7 +111,8 @@ public:
TensorInfo(const TensorInfo& other);
TensorInfo(const TensorShape& shape, DataType dataType,
- float quantizationScale = 0.0f, int32_t quantizationOffset = 0);
+ float quantizationScale = 0.0f, int32_t quantizationOffset = 0,
+ bool isConstant = False);
%feature("docstring",
"
@@ -223,6 +224,26 @@ public:
") IsQuantized;
bool IsQuantized() const;
+ %feature("docstring",
+ "
+ Returns true if the tensor info is constant.
+
+ Returns:
+ bool: True if the tensor info is constant.
+
+ ") IsConstant;
+ bool IsConstant() const;
+
+ %feature("docstring",
+ "
+ Sets the tensor info to be constant.
+
+ Args:
+ IsConstant (bool): Sets tensor info to constant.
+
+ ") SetConstant;
+ void SetConstant(const bool IsConstant = True);
+
%feature("docstring",
@@ -254,6 +275,7 @@ public:
+ ", IsQuantized: " + std::to_string($self->IsQuantized())
+ ", QuantizationScale: " + std::to_string( $self->GetQuantizationScale())
+ ", QuantizationOffset: " + std::to_string($self->GetQuantizationOffset())
+ + ", IsConstant: " + std::to_string($self->IsConstant())
+ ", NumDimensions: " + std::to_string($self->GetNumDimensions())
+ ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
return tmp;
diff --git a/python/pyarmnn/test/test_const_tensor.py b/python/pyarmnn/test/test_const_tensor.py
index fa6327f19c..2358d65918 100644
--- a/python/pyarmnn/test/test_const_tensor.py
+++ b/python/pyarmnn/test/test_const_tensor.py
@@ -6,8 +6,8 @@ import numpy as np
import pyarmnn as ann
-def _get_tensor_info(dt):
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt)
+def _get_const_tensor_info(dt):
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt, 0.0, 0, True)
return tensor_info
@@ -23,7 +23,7 @@ def _get_tensor_info(dt):
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 4)).astype(np.int16))
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
def test_const_tensor_too_many_elements(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
num_bytes = tensor_info.GetNumBytes()
with pytest.raises(ValueError) as err:
@@ -43,7 +43,7 @@ def test_const_tensor_too_many_elements(dt, data):
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 2)).astype(np.int16))
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
def test_const_tensor_too_little_elements(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
num_bytes = tensor_info.GetNumBytes()
with pytest.raises(ValueError) as err:
@@ -63,7 +63,7 @@ def test_const_tensor_too_little_elements(dt, data):
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int16))
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
def test_const_tensor_multi_dimensional_input(dt, data):
- tensor = ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt), data)
+ tensor = ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, True), data)
assert data.size == tensor.GetNumElements()
assert data.nbytes == tensor.GetNumBytes()
@@ -72,7 +72,7 @@ def test_const_tensor_multi_dimensional_input(dt, data):
def test_create_const_tensor_from_tensor():
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32, 0.0, 0, True)
tensor = ann.Tensor(tensor_info)
copied_tensor = ann.ConstTensor(tensor)
@@ -85,7 +85,7 @@ def test_create_const_tensor_from_tensor():
def test_const_tensor_from_tensor_has_memory_area_access_after_deletion_of_original_tensor():
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32, 0.0, 0, True)
tensor = ann.Tensor(tensor_info)
tensor.get_memory_area()[0] = 100
@@ -125,7 +125,7 @@ def test_create_const_tensor_incorrect_args():
(-1, np.random.randint(1, size=(2, 3)).astype(np.float32)),
], ids=['unknown'])
def test_const_tensor_unsupported_datatype(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
with pytest.raises(ValueError) as err:
ann.ConstTensor(tensor_info, data)
@@ -142,7 +142,7 @@ def test_const_tensor_unsupported_datatype(dt, data):
(ann.DataType_QSymmS8, [[1, 1, 1], [1, 1, 1]])
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8'])
def test_const_tensor_incorrect_input_datatype(dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
with pytest.raises(TypeError) as err:
ann.ConstTensor(tensor_info, data)
@@ -163,7 +163,7 @@ def test_const_tensor_incorrect_input_datatype(dt, data):
class TestNumpyDataTypes:
def test_copy_const_tensor(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
tensor = ann.ConstTensor(tensor_info, data)
copied_tensor = ann.ConstTensor(tensor)
@@ -175,7 +175,7 @@ class TestNumpyDataTypes:
assert copied_tensor.GetDataType() == tensor.GetDataType()
def test_const_tensor__str__(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
d_type = tensor_info.GetDataType()
num_dimensions = tensor_info.GetNumDimensions()
num_bytes = tensor_info.GetNumBytes()
@@ -186,7 +186,7 @@ class TestNumpyDataTypes:
"{}, NumElements: {}}}".format(d_type, num_bytes, num_dimensions, num_elements)
def test_const_tensor_with_info(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
elements = tensor_info.GetNumElements()
num_bytes = tensor_info.GetNumBytes()
d_type = dt
@@ -199,7 +199,7 @@ class TestNumpyDataTypes:
assert d_type == tensor.GetDataType()
def test_immutable_memory(self, dt, data):
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
tensor = ann.ConstTensor(tensor_info, data)
@@ -217,7 +217,7 @@ class TestNumpyDataTypes:
ann.DataType_Signed32: np.int32,
ann.DataType_Float16: np.float16}
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
tensor = ann.ConstTensor(tensor_info, data)
assert np_data_type_mapping[tensor.GetDataType()] == data.dtype
@@ -242,10 +242,34 @@ def test_numpy_dtype_mismatch_ann_dtype(dt, data):
ann.DataType_Signed32: np.int32,
ann.DataType_Float16: np.float16}
- tensor_info = _get_tensor_info(dt)
+ tensor_info = _get_const_tensor_info(dt)
with pytest.raises(TypeError) as err:
ann.ConstTensor(tensor_info, data)
assert str(err.value) == "Expected data to have type {} for type {} but instead got numpy.{}".format(
np_data_type_mapping[dt], dt, data.dtype)
+
+@pytest.mark.parametrize("dt, data",
+ [
+ (ann.DataType_Float32, np.random.randint(1, size=(2, 3)).astype(np.float32)),
+ (ann.DataType_Float16, np.random.randint(1, size=(2, 3)).astype(np.float16)),
+ (ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
+ (ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
+ (ann.DataType_QSymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
+ (ann.DataType_Signed32, np.random.randint(1, size=(2, 3)).astype(np.int32)),
+ (ann.DataType_QSymmS16, np.random.randint(1, size=(2, 3)).astype(np.int16))
+ ], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
+class TestConstTensorConstructorErrors:
+
+ def test_tensorinfo_isconstant_not_set(self, dt, data):
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, False), data)
+
+ assert str(err.value) == "TensorInfo when initializing ConstTensor must be set to constant."
+
+ def test_tensor_tensorinfo_isconstant_not_set(self, dt, data):
+ with pytest.raises(ValueError) as err:
+ ann.ConstTensor(ann.Tensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, False), data))
+
+ assert str(err.value) == "TensorInfo of Tensor when initializing ConstTensor must be set to constant." \ No newline at end of file
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index fbdd8044ce..e558e84e28 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -27,6 +27,7 @@ def random_runtime(shared_data_folder):
input_tensor_id = input_binding_info[0]
input_tensor_info = input_binding_info[1]
+ input_tensor_info.SetConstant()
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
diff --git a/python/pyarmnn/test/test_tensor_info.py b/python/pyarmnn/test/test_tensor_info.py
index dc73533869..e54e2a998c 100644
--- a/python/pyarmnn/test/test_tensor_info.py
+++ b/python/pyarmnn/test/test_tensor_info.py
@@ -21,7 +21,7 @@ def test_tensor_info_ctor_shape():
def test_tensor_info__str__():
- tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QAsymmU8, 0.5, 1)
+ tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QAsymmU8, 0.5, 1, True)
assert tensor_info.__str__() == "TensorInfo{DataType: 2, IsQuantized: 1, QuantizationScale: 0.500000, " \
- "QuantizationOffset: 1, NumDimensions: 2, NumElements: 6}"
+ "QuantizationOffset: 1, IsConstant: 1, NumDimensions: 2, NumElements: 6}"
diff --git a/samples/AsyncExecutionSample.cpp b/samples/AsyncExecutionSample.cpp
index 6d2fe243dd..a789aade01 100644
--- a/samples/AsyncExecutionSample.cpp
+++ b/samples/AsyncExecutionSample.cpp
@@ -49,7 +49,7 @@ int main()
INetworkPtr myNetwork = INetwork::Create();
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant();
ConstTensor weights(weightsInfo, weightsData);
@@ -104,11 +104,12 @@ int main()
std::vector<std::vector<float>> outputData;
outputData.resize(2, std::vector<float>(1));
-
+ inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
std::vector<InputTensors> inputTensors
{
- {{0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData[0].data())}},
- {{0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData[1].data())}}
+ {{0, armnn::ConstTensor(inputTensorInfo, inputData[0].data())}},
+ {{0, armnn::ConstTensor(inputTensorInfo, inputData[1].data())}}
};
std::vector<OutputTensors> outputTensors
{
diff --git a/samples/CustomMemoryAllocatorSample.cpp b/samples/CustomMemoryAllocatorSample.cpp
index 171d8e2b5d..a1b05d4be0 100644
--- a/samples/CustomMemoryAllocatorSample.cpp
+++ b/samples/CustomMemoryAllocatorSample.cpp
@@ -78,7 +78,7 @@ int main()
INetworkPtr myNetwork = INetwork::Create();
armnn::FullyConnectedDescriptor fullyConnectedDesc;
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant(true);
armnn::ConstTensor weights(weightsInfo, weightsData);
ARMNN_NO_DEPRECATE_WARN_BEGIN
@@ -152,10 +152,11 @@ int main()
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
std::fill_n(outputPtr, numElements, -10.0f);
-
+ inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, 0), alignedInputPtr)},
+ {0, armnn::ConstTensor(inputTensorInfo, alignedInputPtr)},
};
armnn::OutputTensors outputTensors
{
diff --git a/samples/DynamicSample.cpp b/samples/DynamicSample.cpp
index ffcc9de083..8a6ff92706 100644
--- a/samples/DynamicSample.cpp
+++ b/samples/DynamicSample.cpp
@@ -62,10 +62,12 @@ int main()
};
std::vector<float> outputData(2);
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())},
- {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
+ {1,armnn::ConstTensor(inputTensorInfo, 0), input1Data.data())}
};
OutputTensors outputTensors
{
diff --git a/samples/SimpleSample.cpp b/samples/SimpleSample.cpp
index 3f94b53ca1..01f078bd56 100644
--- a/samples/SimpleSample.cpp
+++ b/samples/SimpleSample.cpp
@@ -28,7 +28,7 @@ int main()
INetworkPtr myNetwork = INetwork::Create();
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant();
ConstTensor weights(weightsInfo, weightsData);
@@ -75,11 +75,12 @@ int main()
std::vector<float> inputData{number};
std::vector<float> outputData(1);
-
- InputTensors inputTensors{{0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0),
- inputData.data())}};
+ inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, armnn::ConstTensor(inputTensorInfo,
+ inputData.data())}};
OutputTensors outputTensors{{0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0),
- outputData.data())}};
+ outputData.data())}};
// Execute network
run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index 9e212306c4..76f39e0feb 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -67,12 +67,12 @@ Status IRuntime::LoadNetwork(NetworkId& networkIdOut,
return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
}
-TensorInfo IRuntime::GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
+armnn::TensorInfo IRuntime::GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
{
return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
}
-TensorInfo IRuntime::GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
+armnn::TensorInfo IRuntime::GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
{
return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
}
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index 05de372eee..4052bb6d3a 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -52,8 +52,8 @@ public:
std::string& errorMessage,
const INetworkProperties& networkProperties);
- TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
- TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
+ armnn::TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
+ armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors);
std::vector<ImportedOutputId> ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors);
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index 66b3d2685a..65318af285 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -35,7 +35,7 @@ struct BFloat16ToFloat32
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::Float32);
+ TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
@@ -56,7 +56,7 @@ struct Float16ToFloat32
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::Float32);
+ TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
@@ -77,7 +77,7 @@ struct Float32ToBFloat16
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::BFloat16);
+ TensorInfo newInfo(info.GetShape(), DataType::BFloat16, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
@@ -98,7 +98,7 @@ struct Float32ToFloat16
info.GetNumElements(),
newValues.data());
- TensorInfo newInfo(info.GetShape(), DataType::Float16);
+ TensorInfo newInfo(info.GetShape(), DataType::Float16, 0.0f, 0, true);
ConstTensor newInput(newInfo, newValues);
handle.reset(new ScopedTensorHandle(newInput));
}
diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp
index fe8238bf14..66f722a8ef 100644
--- a/src/armnn/optimizations/FuseBatchNorm.hpp
+++ b/src/armnn/optimizations/FuseBatchNorm.hpp
@@ -146,7 +146,7 @@ public:
sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut];
}
}
- ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType), fusedBiasVector);
+ ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType, 0.0f, 0, true), fusedBiasVector);
// Insert the new convolution layer that has batch norm parameters fused into
const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") + base.GetName();
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index e21e777409..cbc97b3c0e 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -122,7 +122,7 @@ TEST_CASE("CheckConvolution2dLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
@@ -146,7 +146,7 @@ TEST_CASE("CheckNamedConvolution2dLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
@@ -170,11 +170,11 @@ TEST_CASE("CheckConvolution2dLayerWithBiases")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
Optional<ConstTensor> optionalBiases(biases);
TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
@@ -200,11 +200,11 @@ TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
Optional<ConstTensor> optionalBiases(biases);
TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
@@ -228,7 +228,7 @@ TEST_CASE("CheckDepthwiseConvolution2dLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
@@ -252,7 +252,7 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
@@ -279,11 +279,11 @@ TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
Optional<ConstTensor> optionalBiases(biases);
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
@@ -309,11 +309,11 @@ TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
Optional<ConstTensor> optionalBiases(biases);
TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
@@ -333,7 +333,7 @@ TEST_CASE("CheckFullyConnectedLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestConstantLayerVisitor weightsVisitor(weights);
TestFullyConnectedLayerVistor visitor(descriptor);
@@ -358,7 +358,7 @@ TEST_CASE("CheckNamedFullyConnectedLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestConstantLayerVisitor weightsVisitor(weights);
TestFullyConnectedLayerVistor visitor(descriptor, layerName);
@@ -382,11 +382,11 @@ TEST_CASE("CheckFullyConnectedLayerWithBiases")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
TestConstantLayerVisitor weightsVisitor(weights);
TestConstantLayerVisitor biasesVisitor(biases);
@@ -415,11 +415,11 @@ TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
- ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
+ ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
TestConstantLayerVisitor weightsVisitor(weights);
TestConstantLayerVisitor biasesVisitor(biases);
@@ -446,19 +446,19 @@ TEST_CASE("CheckBatchNormalizationLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
- ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
+ ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32, 0.0f, 0, true), varianceData);
std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
- ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
+ ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32, 0.0f, 0, true), betaData);
std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
- ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
+ ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32, 0.0f, 0, true), gammaData);
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
@@ -477,19 +477,19 @@ TEST_CASE("CheckNamedBatchNormalizationLayer")
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
- ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
+ ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32, 0.0f, 0, true), varianceData);
std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
- ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
+ ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32, 0.0f, 0, true), betaData);
std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
- ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
+ ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32, 0.0f, 0, true), gammaData);
TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
@@ -504,7 +504,7 @@ TEST_CASE("CheckConstLayer")
{
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestConstantLayerVisitor visitor(input);
@@ -519,7 +519,7 @@ TEST_CASE("CheckNamedConstLayer")
const char* layerName = "ConstantLayer";
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
- ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
+ ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
TestConstantLayerVisitor visitor(input, layerName);
@@ -540,47 +540,56 @@ TEST_CASE("CheckLstmLayerBasic")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -613,47 +622,56 @@ TEST_CASE("CheckNamedLstmLayerBasic")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -685,64 +703,76 @@ TEST_CASE("CheckLstmLayerCifgDisabled")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputGateBiasData);
- LstmInputParams params;
+ LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
params.m_InputToCellWeights = &inputToCellWeights;
params.m_InputToOutputWeights = &inputToOutputWeights;
@@ -777,62 +807,74 @@ TEST_CASE("CheckNamedLstmLayerCifgDisabled")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -870,59 +912,70 @@ TEST_CASE("CheckLstmLayerPeephole")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToOutputWeightsData);
- LstmInputParams params;
+ LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
params.m_InputToCellWeights = &inputToCellWeights;
params.m_InputToOutputWeights = &inputToOutputWeights;
@@ -956,77 +1009,92 @@ TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToInputWeights(
- TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
+ TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToInputWeightsData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToOutputWeightsData);
std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputGateBiasData);
LstmInputParams params;
// Basic params
@@ -1071,57 +1139,68 @@ TEST_CASE("CheckNamedLstmLayerPeephole")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToForgetWeightsData);
std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellToOutputWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1158,59 +1237,70 @@ TEST_CASE("CheckLstmLayerProjection")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
ConstTensor projectionBias(
- TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ projectionBiasData);
std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
ConstTensor projectionWeights(
- TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ projectionWeightsData);
- LstmInputParams params;
+ LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
params.m_InputToCellWeights = &inputToCellWeights;
params.m_InputToOutputWeights = &inputToOutputWeights;
@@ -1245,57 +1335,68 @@ TEST_CASE("CheckNamedLstmLayerProjection")
std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ cellBiasData);
std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ outputGateBiasData);
std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
ConstTensor projectionBias(
- TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ projectionBiasData);
std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
ConstTensor projectionWeights(
- TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
+ projectionWeightsData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1330,47 +1431,56 @@ TEST_CASE("CheckQLstmLayerBasic")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1403,47 +1513,56 @@ TEST_CASE("CheckNamedQLstmLayerBasic")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
LstmInputParams params;
params.m_InputToForgetWeights = &inputToForgetWeights;
@@ -1475,63 +1594,75 @@ TEST_CASE("CheckQLstmLayerCifgDisabled")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
// CIFG disabled params
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ inputGateBiasData);
LstmInputParams params;
@@ -1571,79 +1702,94 @@ TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
// CIFG disabled params
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ inputGateBiasData);
// Peephole enabled, CIFG disabled params
std::vector<int16_t> cellToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToInputWeights(
- TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::QSymmS16), cellToInputWeightsData);
+ TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ cellToInputWeightsData);
std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16), cellToForgetWeightsData);
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ cellToForgetWeightsData);
std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16), cellToOutputWeightsData);
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ cellToOutputWeightsData);
LstmInputParams params;
@@ -1688,58 +1834,69 @@ TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
// Peephole enabled and CIFG enabled params
std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToForgetWeights(
- TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16), cellToForgetWeightsData);
+ TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ cellToForgetWeightsData);
std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellToOutputWeights(
- TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16), cellToOutputWeightsData);
+ TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ cellToOutputWeightsData);
LstmInputParams params;
@@ -1778,58 +1935,69 @@ TEST_CASE("CheckQLstmLayerProjectionEnabled")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
// Projection enabled params
std::vector<uint8_t> projectionWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
- ConstTensor projectionWeights(TensorInfo(
- 4, projectionWeightsDimensions.data(), DataType::QSymmS8), projectionWeightsData);
+ ConstTensor projectionWeights(
+ TensorInfo(4, projectionWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ projectionWeightsData);
std::vector<int32_t> projectionBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
- ConstTensor projectionBias(TensorInfo(
- 4, projectionBiasDimensions.data(), DataType::Signed32), projectionBiasData);
+ ConstTensor projectionBias(
+ TensorInfo(4, projectionBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ projectionBiasData);
LstmInputParams params;
@@ -1868,84 +2036,100 @@ TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
// CIFG disabled params
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ inputGateBiasData);
// Layer Norm enabled, CIFG disabled params
std::vector<int16_t> inputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputLayerNormWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputLayerNormWeights(
- TensorInfo(4, inputLayerNormWeightsDimensions.data(), DataType::QSymmS16), inputLayerNormWeightsData);
+ TensorInfo(4, inputLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ inputLayerNormWeightsData);
std::vector<int16_t> forgetLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetLayerNormWeightsDimensions = {1, 1, 3, 3};
ConstTensor forgetLayerNormWeights(
- TensorInfo(4, forgetLayerNormWeightsDimensions.data(), DataType::QSymmS16), forgetLayerNormWeightsData);
+ TensorInfo(4, forgetLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ forgetLayerNormWeightsData);
std::vector<int16_t> cellLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellLayerNormWeightsDimensions = {1, 1, 3, 3};
ConstTensor cellLayerNormWeights(
- TensorInfo(4, cellLayerNormWeightsDimensions.data(), DataType::QSymmS16), cellLayerNormWeightsData);
+ TensorInfo(4, cellLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ cellLayerNormWeightsData);
std::vector<int16_t> outputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputLayerNormWeightsDimensions = {1, 1, 3, 3};
ConstTensor outputLayerNormWeights(
- TensorInfo(4, outputLayerNormWeightsDimensions.data(), DataType::QSymmS16), outputLayerNormWeightsData);
+ TensorInfo(4, outputLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
+ outputLayerNormWeightsData);
LstmInputParams params;
@@ -1985,64 +2169,76 @@ TEST_CASE("CheckQuantizedLstmLayer")
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ inputGateBiasData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
QuantizedLstmInputParams params;
@@ -2075,64 +2271,76 @@ TEST_CASE("CheckNamedQuantizedLstmLayer")
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
+ ConstTensor recurrentToInputWeights(
+ TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
+ ConstTensor recurrentToForgetWeights(
+ TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
+ ConstTensor recurrentToCellWeights(
+ TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
- ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
+ ConstTensor recurrentToOutputWeights(
+ TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
+ recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
ConstTensor inputGateBias(
- TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
+ TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ inputGateBiasData);
std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor forgetGateBias(TensorInfo(
- 4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
+ ConstTensor forgetGateBias(
+ TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ forgetGateBiasData);
std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
- ConstTensor cellBias(TensorInfo(
- 4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
+ ConstTensor cellBias(
+ TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ cellBiasData);
std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
- ConstTensor outputGateBias(TensorInfo(
- 4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
+ ConstTensor outputGateBias(
+ TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
+ outputGateBiasData);
QuantizedLstmInputParams params;
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 759ada97cd..ea8a436177 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -2052,7 +2052,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
// ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
// ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC
// this test is using NHWC, so the weights shape is OHWI
- TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0);
+ TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0, true);
unsigned int weightsLength = weightsTensorInfo.GetNumElements();
using WeightType = armnn::ResolveType<dataType>;
@@ -2079,7 +2079,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
armnn::DataType::Signed32 : armnn::DataType::Float32;
- TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0);
+ TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0, true);
unsigned int biasLength = biasTensorInfo.GetNumElements();
using BiasType = armnn::ResolveType<biasDataType>;
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index 48e2c15a79..69b9736f92 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -71,9 +71,11 @@ TEST_CASE("RuntimeRegisterDebugCallback")
std::vector<float> inputData({-2, -1, 0, 1, 2});
std::vector<float> outputData(5);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{
diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp
index cb56873663..cdd86c06e4 100644
--- a/src/armnn/test/FlowControl.cpp
+++ b/src/armnn/test/FlowControl.cpp
@@ -27,7 +27,7 @@ TEST_CASE("ErrorOnLoadNetwork")
INetworkPtr net(INetwork::Create());
std::vector<uint8_t> falseData = {0};
- ConstTensor falseTensor(armnn::TensorInfo({1}, armnn::DataType::Boolean), falseData);
+ ConstTensor falseTensor(armnn::TensorInfo({1}, armnn::DataType::Boolean, 0.0f, 0, true), falseData);
IConnectableLayer* constLayer = net->AddConstantLayer(falseTensor, "const");
constLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean));
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index b697f6dbe6..f3753398b4 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -601,7 +601,7 @@ TEST_CASE("CheckGraphConstTensorSharing")
armnn::ConstantLayer* const constantLayer = graph1.AddLayer<armnn::ConstantLayer>("ConstantLayer");
float weight = 1.0f;
- armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32}, &weight);
+ armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32, 0.0f, 0, true}, &weight);
constantLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
// point sharedWeightPtr to graph1's const tensor
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 25dab596fd..c1927e3601 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -74,7 +74,7 @@ TEST_CASE("NetworkModification")
unsigned int dims[] = { 10,1,1,1 };
std::vector<float> convWeightsData(10);
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), convWeightsData);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), convWeightsData);
armnn::Convolution2dDescriptor convDesc2d;
armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d,
@@ -123,7 +123,7 @@ TEST_CASE("NetworkModification")
armnn::BatchNormalizationDescriptor batchNormDesc;
- armnn::TensorInfo tensorInfo({ 1 }, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> data(tensorInfo.GetNumBytes() / sizeof(float));
armnn::ConstTensor invalidTensor(tensorInfo, data);
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 3cea1b540e..750e6967ad 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -405,7 +405,9 @@ void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
std::vector<float> weightsVector(90);
- armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
+ armnn::ConstTensor weights(
+ armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32, 0.0f, 0, true),
+ weightsVector);
Convolution2dDescriptor desc;
desc.m_BiasEnabled = false;
@@ -455,7 +457,9 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
std::vector<float> weightsVector(18);
- armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector);
+ armnn::ConstTensor weights(
+ armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32, 0.0f, 0, true),
+ weightsVector);
DepthwiseConvolution2dDescriptor desc;
desc.m_BiasEnabled = false;
@@ -653,7 +657,7 @@ TEST_CASE("DetectionPostProcessValidateTensorShapes")
armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
std::vector<uint8_t> anchorsVector(40);
- armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8), anchorsVector);
+ armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8, 0.0f, 0, true), anchorsVector);
armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
@@ -833,16 +837,16 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
TensorInfo outputInfo(4, outputDimensionSizes, DataType::Float32);
std::vector<float> weightsVector = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 };
- ConstTensor weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32), weightsVector);
+ ConstTensor weights(TensorInfo(4, weightsDimensionSizes, DataType::Float32, 0.0f, 0, true), weightsVector);
std::vector<float> betaVector = { 0.1f };
std::vector<float> gammaVector = { 0.5f };
std::vector<float> meanVector = { 0 };
std::vector<float> varianceVector = { 1 };
- ConstTensor beta(TensorInfo(1, outputChannelSize, DataType::Float32), betaVector);
- ConstTensor gamma(TensorInfo(1, outputChannelSize, DataType::Float32), gammaVector);
- ConstTensor mean(TensorInfo(1, outputChannelSize, DataType::Float32), meanVector);
- ConstTensor variance(TensorInfo(1, outputChannelSize, DataType::Float32), varianceVector);
+ ConstTensor beta(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), betaVector);
+ ConstTensor gamma(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), gammaVector);
+ ConstTensor mean(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), meanVector);
+ ConstTensor variance(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), varianceVector);
// Define the network
Graph graph;
@@ -863,7 +867,7 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
if (convolution2dDescriptor.m_BiasEnabled)
{
std::vector<float> biasVector = { 11 };
- ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32), biasVector);
+ ConstTensor bias(TensorInfo(1, outputChannelSize, DataType::Float32, 0.0f, 0, true), biasVector);
conv->m_Bias = std::make_unique<ScopedTensorHandle>(bias);
}
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 397a545878..f055f2368b 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -97,8 +97,8 @@ TEST_CASE("RuntimePreImportInputs")
std::vector<int> inputData2(4, 20);
std::vector<int> output(4);
- ConstTensor inputTensor1({{4}, armnn::DataType::Signed32}, inputData1.data());
- ConstTensor inputTensor2({{4}, armnn::DataType::Signed32}, inputData2.data());
+ ConstTensor inputTensor1({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData1.data());
+ ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}});
@@ -177,7 +177,7 @@ TEST_CASE("RuntimePreImportOutputs")
armnn::NetworkId networkId = 1;
armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
- TensorInfo tensorInfo{{4}, armnn::DataType::Float32};
+ TensorInfo tensorInfo{{4}, armnn::DataType::Float32, 0.0f, 0, true};
auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
@@ -902,9 +902,11 @@ TEST_CASE("ProfilingEnableCpuRef")
std::vector<float> inputData(16);
std::vector<float> outputData(16);
+ TensorInfo inputTensorInfo = runtime.GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index d3c928fec1..f808a0e349 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -233,14 +233,14 @@ TEST_CASE("ConcatTest")
CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
}
-TEST_CASE("ConstantTesst")
+TEST_CASE("ConstantTest")
{
Graph graph;
TensorShape outputShape{ 1, 1, 3, 3 };
auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant");
const float Datum = 0.0f;
- ConstTensor output0({outputShape, DataType::Float32}, &Datum);
+ ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum);
layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
@@ -294,7 +294,7 @@ TEST_CASE("Convolution2dTest")
"conv2d");
const float Datum = 0.0f;
- ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum);
+ ConstTensor weights({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum);
layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
@@ -339,7 +339,7 @@ TEST_CASE("DepthwiseConvolutionTest")
"depthwiseconv2d");
const float Datum = 0.0f;
- ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
+ ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
@@ -371,7 +371,7 @@ TEST_CASE("DetectionPostProcessTest")
descriptor.m_ScaleW = 5.0;
const float Datum = 0.0f;
- ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32}, &Datum);
+ ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum);
Graph graph;
@@ -460,7 +460,7 @@ TEST_CASE("LstmTest")
auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm");
float Datum = 0.0f;
- ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
+ ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
@@ -548,7 +548,7 @@ TEST_CASE("QLstmTest")
auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm");
float Datum = 0.0f;
- ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
+ ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
@@ -576,7 +576,7 @@ TEST_CASE("QuantizedLstmTest")
auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm");
float Datum = 0.0f;
- ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum);
+ ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index 1ecad503d4..8d8751f614 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -145,18 +145,56 @@ TEST_CASE("TensorVsConstTensor")
const int immutableDatum = 3;
armnn::Tensor uninitializedTensor;
+ uninitializedTensor.GetInfo().SetConstant(true);
armnn::ConstTensor uninitializedTensor2;
uninitializedTensor2 = uninitializedTensor;
- armnn::Tensor t(TensorInfo(), &mutableDatum);
- armnn::ConstTensor ct(TensorInfo(), &immutableDatum);
+ armnn::TensorInfo emptyTensorInfo;
+ emptyTensorInfo.SetConstant(true);
+ armnn::Tensor t(emptyTensorInfo, &mutableDatum);
+ armnn::ConstTensor ct(emptyTensorInfo, &immutableDatum);
// Checks that both Tensor and ConstTensor can be passed as a ConstTensor.
CheckTensor(t);
CheckTensor(ct);
}
+TEST_CASE("ConstTensor_EmptyConstructorTensorInfoSet")
+{
+ armnn::ConstTensor t;
+ CHECK(t.GetInfo().IsConstant() == true);
+}
+
+TEST_CASE("ConstTensor_TensorInfoNotConstantError")
+{
+ armnn::TensorInfo tensorInfo ({ 1 }, armnn::DataType::Float32);
+ std::vector<float> tensorData = { 1.0f };
+ try
+ {
+ armnn::ConstTensor ct(tensorInfo, tensorData);
+ FAIL("InvalidArgumentException should have been thrown");
+ }
+ catch(const InvalidArgumentException& exc)
+ {
+ CHECK(strcmp(exc.what(), "Invalid attempt to construct ConstTensor from non-constant TensorInfo.") == 0);
+ }
+}
+
+TEST_CASE("PassTensorToConstTensor_TensorInfoNotConstantError")
+{
+ try
+ {
+ armnn::ConstTensor t = ConstTensor(Tensor());
+ FAIL("InvalidArgumentException should have been thrown");
+ }
+ catch(const InvalidArgumentException& exc)
+ {
+ CHECK(strcmp(exc.what(), "Invalid attempt to construct ConstTensor from "
+ "Tensor due to non-constant TensorInfo") == 0);
+ }
+}
+
TEST_CASE("ModifyTensorInfo")
{
TensorInfo info;
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 36a4507fc3..7573005518 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -290,7 +290,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
{
Graph graph;
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::QAsymmU8);
- const TensorInfo info1({ 5 }, DataType::QAsymmU8);
+ const TensorInfo info1({ 5 }, DataType::QAsymmU8, 0.0f, 0, true);
const TensorInfo outputInfo({ 1, 2, 3, 5 }, DataType::QAsymmU8);
auto input = graph.AddLayer<InputLayer>(0, "input");
@@ -346,7 +346,7 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
// What we'll do is have two sequential add layers both using the same const tensor.
Graph graph;
const TensorInfo inputInfo({ 1, 512 }, DataType::Float32);
- const TensorInfo constantTermInfo({ 1 }, DataType::Float32);
+ const TensorInfo constantTermInfo({ 1 }, DataType::Float32, 0.0f, 0, true);
const TensorInfo outputInfo({ 1, 512 }, DataType::Float32);
auto input = graph.AddLayer<InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index b78a1bf207..7b326fa8bc 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -32,7 +32,7 @@ TEST_CASE("ConvertConstantsFloatToBFloatTest")
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
};
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
@@ -88,7 +88,7 @@ TEST_CASE("ConvertConstantsBFloatToFloatTest")
std::vector<uint16_t> bfWeights(8);
armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
bfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16), bfWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights);
//Create the simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index e6cca4f7bf..f74ab0f308 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -25,7 +25,7 @@ TEST_CASE("ConvertConstantsFloatToHalfTest")
// Create const tensor from fp32 data
unsigned int dims[] = { 4, 1, 1, 1 };
std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 2ec1279f33..c4551525c1 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -25,7 +25,7 @@ TEST_CASE("ConvertConstantsHalfToFloatTest")
std::vector<uint16_t> halfWeights(4);
armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
halfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16), halfWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true), halfWeights);
//Create the simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 11f09e80e0..a598983706 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -45,7 +45,7 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
std::vector<float> weightsVector(18);
- ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector);
+ ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
@@ -122,7 +122,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
std::vector<float> weightsVector(18);
- ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector);
+ ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
"depthwiseConv2d");
@@ -526,7 +526,9 @@ TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimiza
NetworkId networkIdentifier;
CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
- InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
// Set the initial values of the data to different values to the golden data just in case the inference fails.
std::vector<float> optimizedData(32, -std::numeric_limits<float>::infinity());
@@ -614,10 +616,10 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
- TensorInfo weightsInfo(4, weightsShape, DataType::Float32);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsData);
std::vector<float> biasVector = {5, 6, 7, 8};
- TensorInfo biasInfo({4}, DataType::Float32);
+ TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -644,7 +646,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
NetworkId networkIdentifier;
CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
- InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
// Set the initial values of the data to different values to the golden data just in case the inference fails.
std::vector<float> optimizedData(100, -std::numeric_limits<float>::infinity());
@@ -732,10 +736,10 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
- TensorInfo weightsInfo(4, weightsShape, DataType::Float32);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsData);
std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8};
- TensorInfo biasInfo({12}, DataType::Float32);
+ TensorInfo biasInfo({12}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -762,7 +766,9 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp
NetworkId networkIdentifier;
CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
- InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
// Set the initial values of the data to different values to the golden data just in case the inference fails.
std::vector<float> optimizedData(300, -std::numeric_limits<float>::infinity());
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index 384b14c0cf..63cd170f02 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -59,12 +59,12 @@ TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
};
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create const bias fp32 data
unsigned int biasDims[] {4};
std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
+ armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
// A network with Convolution2d layer
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
@@ -129,12 +129,12 @@ TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
};
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create const bias fp32 data
unsigned int biasDims[] {4};
std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
+ armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
// A network with FullyConnected layer
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 2352a3c498..54a9d9a189 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -66,7 +66,7 @@ struct Convolution2dTest
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
- TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
+ TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
ConstTensor weights(weightsInfo, weightsVector);
Optional<ConstTensor> optionalBias;
@@ -115,7 +115,7 @@ public:
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
- TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
+ TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
ConstTensor weights(weightsInfo, weightsVector);
Optional<ConstTensor> optionalBias;
@@ -212,10 +212,10 @@ public:
std::vector<T> varianceVector = GetVector<T>(GetOutputShape()[3], 1.0f, 0.1f);
const unsigned int outputChannelSize[] = { GetOutputShape()[3] };
- ConstTensor beta(TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
- ConstTensor gamma(TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
- ConstTensor mean(TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
- ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
+ ConstTensor beta(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), betaVector);
+ ConstTensor gamma(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), gammaVector);
+ ConstTensor mean(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), meanVector);
+ ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), varianceVector);
return network->AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma, name);
}
@@ -491,8 +491,11 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
std::vector<T> outputDataFused(LayerTest::outputSize);
+ armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+
InputTensors inputTensorsFused{
- {0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
@@ -545,8 +548,11 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
std::vector<T> outputDataNotFused(LayerTest::outputSize);
std::vector<T> outputData2NotFused(LayerTest::outputSize);
+ TensorInfo inputTensorInfoNotFused = runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0);
+ inputTensorInfoNotFused.SetConstant(true);
+
InputTensors inputTensorsNotFused{
- {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
+ {0, ConstTensor(inputTensorInfoNotFused, inputDataNotFused.data())}};
OutputTensors outputTensorsNotFused{
{0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
@@ -591,8 +597,11 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
std::vector<T> outputDataFused(LayerTest::outputSize);
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+
InputTensors inputTensorsFused{
- {0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 20d2940b81..0e969c1a5c 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -107,11 +107,11 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector(begin(weightsIntVector), end(weightsIntVector));
- TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType);
+ TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsVector);
std::vector<T> biasVector = GetVector<T>(outputDimensionSizes[3], 3.3f, 0.1f);
- TensorInfo biasInfo(1, outputChannelSize, ArmnnType);
+ TensorInfo biasInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -120,10 +120,10 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
std::vector<T> meanVector = GetVector<T>(outputDimensionSizes[3], 0.1f, 0.1f);
std::vector<T> varianceVector = GetVector<T>(outputDimensionSizes[3], 1.0f, 0.1f);
- ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
- ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
- ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
- ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
+ ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), betaVector);
+ ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), gammaVector);
+ ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), meanVector);
+ ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), varianceVector);
// Create a network
INetworkPtr network = INetwork::Create();
@@ -215,8 +215,10 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
outputDataFused.resize(108);
}
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensorsFused {
- {0, ConstTensor(run->GetInputTensorInfo (networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
@@ -259,8 +261,11 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
outputDataNotFused.resize(108);
outputData2NotFused.resize(108);
}
+
+ TensorInfo inputTensorInfo2 = runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0);
+ inputTensorInfo2.SetConstant(true);
InputTensors inputTensorsNotFused{
- {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
+ {0, ConstTensor(inputTensorInfo2, inputDataNotFused.data())}};
OutputTensors outputTensorsNotFused{
{0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index e91e16f132..f862315220 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -232,7 +232,7 @@ TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
-1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f,
// clang-format on
};
- ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData);
+ ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32, 0.0f, 0, true), inputData);
InputTensors inputs = { { 0, input } };
std::vector<float> outputData(4 * 6);
Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data());
@@ -279,7 +279,7 @@ TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
-1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f,
// clang-format on
};
- ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData);
+ ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32, 0.0f, 0, true), inputData);
InputTensors inputs = { { 0, input } };
std::vector<float> outputData(4 * 6);
Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data());
diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
index 0be8857224..692f371356 100644
--- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
+++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
@@ -94,9 +94,11 @@ void ReduceWithMultipleAxesTest(INetworkPtr& network,
// Create input and output tensors
std::vector<float> outputData(expectedOutput.size());
+ armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index ac8d4b3ba6..f4600596c8 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -215,6 +215,7 @@ void ParserFlatbuffersSerializeFixture::RunTest(
{
armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
m_Parser->GetNetworkInputBindingInfo(layersId, it.first));
+ bindingInfo.second.SetConstant(true);
armnn::VerifyTensorInfoDataType(bindingInfo.second, ArmnnInputType);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index fb99e0bc3e..341752dd67 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -84,9 +84,11 @@ TEST_CASE("ActivationSerialization")
run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
std::vector<float> inputData {0.0f, -5.3f, 42.0f, -42.0f};
+ armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
std::vector<float> expectedOutputData {0.0f, 0.0f, 42.0f, 0.0f};
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index bdc37877f7..3178bc990e 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -190,7 +190,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
const uint32_t numUnits = 4;
const uint32_t outputSize = numUnits;
- armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
@@ -200,7 +200,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
@@ -210,7 +210,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
@@ -304,7 +304,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -317,7 +317,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -330,7 +330,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -352,11 +352,11 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -451,7 +451,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -464,7 +464,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -477,7 +477,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -499,11 +499,11 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -1236,7 +1236,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
const uint32_t numUnits = 20u;
const uint32_t outputSize = 16u;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -1249,7 +1249,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
std::vector<float> inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -1262,7 +1262,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
std::vector<float> outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -1284,11 +1284,11 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
std::vector<float> cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f);
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f);
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.0f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -1454,7 +1454,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
armnn::TensorShape inputToForgetWeightsShape = {4, 2};
@@ -1462,7 +1462,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
armnn::TensorShape inputToCellWeightsShape = {4, 2};
@@ -1470,7 +1470,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
armnn::TensorShape inputToOutputWeightsShape = {4, 2};
@@ -1478,7 +1478,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
// The shape of recurrent weight data is {outputSize, outputSize} = {4, 4}
@@ -1487,7 +1487,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
@@ -1495,7 +1495,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
@@ -1503,7 +1503,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
@@ -1511,7 +1511,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
// The shape of bias data is {outputSize} = {4}
@@ -1520,7 +1520,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData);
armnn::TensorShape forgetGateBiasShape = {4};
@@ -1528,7 +1528,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData);
armnn::TensorShape cellBiasShape = {4};
@@ -1536,7 +1536,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo cellBiasInfo(cellBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData);
armnn::TensorShape outputGateBiasShape = {4};
@@ -1544,7 +1544,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData);
armnn::QuantizedLstmInputParams params;
@@ -1655,14 +1655,14 @@ TEST_CASE("SerializeDeserializeQLstmBasic")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
- armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset);
+ armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset, true);
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
std::vector<int8_t> inputToCellWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -1816,22 +1816,22 @@ TEST_CASE("SerializeDeserializeQLstmCifgLayerNorm")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo biasInfo({numUnits},
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset);
+ layerNormOffset, true);
// Mandatory params
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -2003,32 +2003,32 @@ TEST_CASE("SerializeDeserializeQLstmAdvanced")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo biasInfo({numUnits},
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::TensorInfo peepholeWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset);
+ layerNormOffset, true);
armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
// Mandatory params
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -2213,7 +2213,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
const uint32_t numUnits = 4;
const uint32_t outputSize = numUnits;
- armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
@@ -2223,7 +2223,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
@@ -2233,7 +2233,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
@@ -2318,7 +2318,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -2331,7 +2331,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -2344,7 +2344,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -2366,11 +2366,11 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -2456,7 +2456,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -2469,7 +2469,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -2482,7 +2482,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -2504,11 +2504,11 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -2611,7 +2611,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
const uint32_t numUnits = 4;
const uint32_t outputSize = numUnits;
- armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
@@ -2621,7 +2621,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
@@ -2631,7 +2631,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index e32b90837d..f4e25998d9 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -98,10 +98,10 @@ TEST_CASE("SerializeBatchNormalization")
const armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32);
- const armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32);
- const armnn::TensorInfo betaInfo({1}, armnn::DataType::Float32);
- const armnn::TensorInfo gammaInfo({1}, armnn::DataType::Float32);
+ const armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo betaInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo gammaInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
armnn::BatchNormalizationDescriptor descriptor;
descriptor.m_Eps = 0.0010000000475f;
@@ -307,7 +307,7 @@ TEST_CASE("SerializeConstant")
};
const std::string layerName("constant");
- const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> constantData = GenerateRandomData<float>(info.GetNumElements());
armnn::ConstTensor constTensor(info, constantData);
@@ -339,8 +339,8 @@ TEST_CASE("SerializeConvolution2d")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -395,10 +395,10 @@ TEST_CASE("SerializeConvolution2dWithPerAxisParams")
const std::vector<float> quantScales{ 0.75f, 0.65f, 0.85f };
constexpr unsigned int quantDimension = 0;
- const TensorInfo kernelInfo({ 3, 1, 1, 2 }, DataType::QSymmS8, quantScales, quantDimension);
+ const TensorInfo kernelInfo({ 3, 1, 1, 2 }, DataType::QSymmS8, quantScales, quantDimension, true);
const std::vector<float> biasQuantScales{ 0.25f, 0.50f, 0.75f };
- const TensorInfo biasInfo({ 3 }, DataType::Signed32, biasQuantScales, quantDimension);
+ const TensorInfo biasInfo({ 3 }, DataType::Signed32, biasQuantScales, quantDimension, true);
std::vector<int8_t> kernelData = GenerateRandomData<int8_t>(kernelInfo.GetNumElements());
armnn::ConstTensor weights(kernelInfo, kernelData);
@@ -445,8 +445,8 @@ TEST_CASE("SerializeConvolution3d")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 5, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -530,8 +530,8 @@ TEST_CASE("SerializeDepthwiseConvolution2d")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -585,11 +585,11 @@ TEST_CASE("SerializeDepthwiseConvolution2dWithPerAxisParams")
const std::vector<float> quantScales{ 0.75f, 0.80f, 0.90f, 0.95f };
const unsigned int quantDimension = 0;
- TensorInfo kernelInfo({ 2, 2, 2, 2 }, DataType::QSymmS8, quantScales, quantDimension);
+ TensorInfo kernelInfo({ 2, 2, 2, 2 }, DataType::QSymmS8, quantScales, quantDimension, true);
const std::vector<float> biasQuantScales{ 0.25f, 0.35f, 0.45f, 0.55f };
constexpr unsigned int biasQuantDimension = 0;
- TensorInfo biasInfo({ 4 }, DataType::Signed32, biasQuantScales, biasQuantDimension);
+ TensorInfo biasInfo({ 4 }, DataType::Signed32, biasQuantScales, biasQuantDimension, true);
std::vector<int8_t> kernelData = GenerateRandomData<int8_t>(kernelInfo.GetNumElements());
armnn::ConstTensor weights(kernelInfo, kernelData);
@@ -685,7 +685,7 @@ TEST_CASE("SerializeDeserializeDetectionPostProcess")
descriptor.m_ScaleH = 5.0;
descriptor.m_ScaleW = 5.0;
- const armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+ const armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32, 0.0f, 0, true);
const std::vector<float> anchorsData({
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 1.0f, 1.0f,
@@ -913,8 +913,8 @@ TEST_CASE("SerializeFullyConnected")
const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -1003,8 +1003,8 @@ TEST_CASE("SerializeFullyConnectedWeightsAndBiasesAsConstantLayers")
const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
@@ -1077,7 +1077,7 @@ TEST_CASE("SerializeGather")
const std::string layerName("gather");
armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8);
armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8);
- const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
+ const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32, 0.0f, 0, true);
GatherDescriptor descriptor;
descriptor.m_Axis = 1;
@@ -2447,7 +2447,7 @@ TEST_CASE("SerializeSwitch")
};
const std::string layerName("switch");
- const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32);
+ const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> constantData = GenerateRandomData<float>(info.GetNumElements());
armnn::ConstTensor constTensor(info, constantData);
@@ -2509,8 +2509,8 @@ TEST_CASE("SerializeTransposeConvolution2d")
const armnn::TensorInfo inputInfo ({ 1, 7, 7, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 9, 9, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -2594,7 +2594,7 @@ TEST_CASE("SerializeDeserializeNonLinearNetwork")
};
const std::string layerName("constant");
- const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> constantData = GenerateRandomData<float>(info.GetNumElements());
armnn::ConstTensor constTensor(info, constantData);
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index a237d2fc14..871f647bb2 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -303,6 +303,7 @@ void ParserFlatbuffersFixture::FillInputTensors(
for (auto&& it : inputData)
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
+ bindingInfo.second.SetConstant(true);
armnn::VerifyTensorInfoDataType(bindingInfo.second, dataType);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 3c659d3fd6..76e65dfd8c 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -208,6 +208,7 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
for (auto&& it : inputData)
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
+ bindingInfo.second.SetConstant(true);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
if (bindingInfo.second.GetNumElements() != it.second.size())
{
diff --git a/src/armnnUtils/TensorIOUtils.hpp b/src/armnnUtils/TensorIOUtils.hpp
index b06bb7132b..55dd3428b8 100644
--- a/src/armnnUtils/TensorIOUtils.hpp
+++ b/src/armnnUtils/TensorIOUtils.hpp
@@ -41,8 +41,9 @@ inline armnn::InputTensors MakeInputTensors(const std::vector<armnn::BindingPoin
inputBinding.second.GetNumElements(),
value.size()));
}
-
- armnn::ConstTensor inputTensor(inputBinding.second, value.data());
+ armnn::TensorInfo inputTensorInfo = inputBinding.second;
+ inputTensorInfo.SetConstant(true);
+ armnn::ConstTensor inputTensor(inputTensorInfo, value.data());
inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor));
},
inputData);
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index fe681936f1..fcdad3e21b 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -33,7 +33,7 @@ armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
{
::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes());
}
-
+ tensorInfo.SetConstant(true);
return ConstTensor(tensorInfo, permuteBuffer);
}
diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
index 0b1bf772ce..f7d4596450 100644
--- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
@@ -127,7 +127,7 @@ void EluEndToEndTest(const std::vector<BackendId>& backends)
float qScale = 1.0f;
int32_t qOffset = 0;
- armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0);
@@ -156,7 +156,7 @@ void HardSwishEndToEndTest(const std::vector<BackendId>& backends)
float qScale = 1.0f;
int32_t qOffset = 0;
- armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
armnn::ActivationDescriptor descriptor(ActivationFunction::HardSwish, 1.0);
diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
index 2ffe06f218..041f9f8f17 100644
--- a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
@@ -47,7 +47,7 @@ void ArgMinMaxEndToEndImpl(const armnn::TensorShape& inputShape,
const float qScale = armnn::IsQuantizedType<T>() ? 2.0f : 1.0f;
const int32_t qOffset = armnn::IsQuantizedType<T>() ? 2 : 0;
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset, true);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
// quantize data
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index 254b3c20a0..859694ceb2 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -30,7 +30,7 @@ INetworkPtr CreateBatchToSpaceNdNetwork(const armnn::TensorShape& inputShape,
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
BatchToSpaceNdDescriptor batchToSpaceNdDesc(blockShape, crops);
diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
index 9ec764402e..7d46be7bcb 100644
--- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
@@ -37,6 +37,7 @@ void ChannelShuffleEndToEnd(const std::vector<BackendId>& backends)
inputInfo.SetQuantizationScale(1.0f);
inputInfo.SetQuantizationOffset(0);
+ inputInfo.SetConstant(true);
outputInfo.SetQuantizationScale(1.0f);
outputInfo.SetQuantizationOffset(0);
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index 40e3fd62ee..e274163c6f 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -35,7 +35,7 @@ INetworkPtr CreateComparisonNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
- TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset, true);
IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(input, comparisonLayer, inputTensorInfo, 0, i);
}
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index 5b2f33fc1a..62f0e4cd36 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -39,7 +39,7 @@ INetworkPtr CreateConcatNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
- TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset, true);
IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(input, concat, inputTensorInfo, 0, i);
}
diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
index 33bf9a180b..b1f685b4cd 100644
--- a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
@@ -56,7 +56,7 @@ void Convolution3dEndToEnd(const std::vector<armnn::BackendId>& backends,
const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
- TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset);
+ TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset, true);
TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index ea997290e5..0a4c29b56d 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -120,7 +120,7 @@ std::unique_ptr<Workload> CreateWorkload(TensorInfo info, ITensorHandle* inputTe
TEST_CASE("TestAsyncExecute")
{
- TensorInfo info({5}, DataType::Signed32);
+ TensorInfo info({5}, DataType::Signed32, 0.0, 0, true);
int inVals[5]{2, 2, 2, 2, 2};
int outVals[5]{1, 1, 1, 1, 1};
@@ -157,7 +157,7 @@ TEST_CASE("TestAsyncExecute")
TEST_CASE("TestDefaultAsyncExecute")
{
- TensorInfo info({5}, DataType::Signed32);
+ TensorInfo info({5}, DataType::Signed32, 0.0f, 0, true);
std::vector<int> inVals{2, 2, 2, 2, 2};
std::vector<int> outVals{1, 1, 1, 1, 1};
@@ -193,7 +193,7 @@ TEST_CASE("TestDefaultAsyncExeuteWithThreads")
{
// Use a large vector so the threads have a chance to interact
unsigned int vecSize = 1000;
- TensorInfo info({vecSize}, DataType::Signed32);
+ TensorInfo info({vecSize}, DataType::Signed32, 0.0f, 0, true);
std::vector<int> inVals1(vecSize, 2);
std::vector<int> outVals1(vecSize, 1);
diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
index c6176aef5b..b64e618075 100644
--- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
@@ -44,6 +44,7 @@ void DepthToSpaceEndToEndImpl(const std::vector<armnn::BackendId>& backends,
using namespace armnn;
TensorInfo inputInfo(nhwcInputShape, ArmnnType);
+ inputInfo.SetConstant(true);
TensorInfo outputInfo(nhwcOutputShape, ArmnnType);
constexpr float qScale = 0.25f;
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index a5e2faccc9..fff4c4fab9 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -43,6 +43,7 @@ void DequantizeEndToEndLayerTestImpl(const std::vector<BackendId>& backends,
inputInfo.SetQuantizationScale(scale);
inputInfo.SetQuantizationOffset(offset);
+ inputInfo.SetConstant(true);
// Builds up the structure of the network
armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index a566964ba2..c4488865a1 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -82,10 +82,13 @@ void DetectionPostProcessEndToEnd(const std::vector<BackendId>& backends, bool u
boxEncodingsInfo.SetQuantizationScale(boxScale);
boxEncodingsInfo.SetQuantizationOffset(boxOffset);
+ boxEncodingsInfo.SetConstant(true);
scoresInfo.SetQuantizationScale(scoreScale);
scoresInfo.SetQuantizationOffset(scoreOffset);
+ scoresInfo.SetConstant(true);
anchorsInfo.SetQuantizationScale(anchorScale);
anchorsInfo.SetQuantizationOffset(anchorOffset);
+ anchorsInfo.SetConstant(true);
// Builds up the structure of the network
armnn::INetworkPtr net = CreateDetectionPostProcessNetwork<T>(boxEncodingsInfo, scoresInfo,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 046ee3a488..0d9d3dd31b 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1594,10 +1594,12 @@ void SampleDynamicBackendEndToEndTestImpl()
std::vector<float> expectedOutputData{ 15.0f, 11.0f };
std::vector<float> outputData(2);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())},
- {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
+ {1,armnn::ConstTensor(inputTensorInfo, input1Data.data())}
};
OutputTensors outputTensors
{
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index f958613d02..635dc96720 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -33,7 +33,7 @@ INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape,
ElementwiseUnaryDescriptor descriptor(operation);
IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
- TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset, true);
IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 2d268f8ea1..269a46077e 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -79,7 +79,8 @@ bool ConstantUsageTest(const std::vector<BackendId>& computeDevice,
inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
{
- const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+ TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+ commonTensorInfo.SetConstant(true);
return ConstantUsageTest(backends,
commonTensorInfo,
@@ -98,6 +99,7 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
commonTensorInfo.SetQuantizationScale(scale);
commonTensorInfo.SetQuantizationOffset(offset);
+ commonTensorInfo.SetConstant(true);
return ConstantUsageTest(backends,
commonTensorInfo,
@@ -198,7 +200,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
@@ -263,7 +265,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
@@ -334,7 +336,7 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
@@ -418,7 +420,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
@@ -449,6 +451,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
};
INFO("Create Network");
+
InputTensors inputTensors
{
{0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -507,7 +510,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
@@ -536,6 +539,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
};
INFO("Create Network");
+
InputTensors inputTensors
{
{0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -594,7 +598,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
@@ -624,6 +628,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
};
INFO("Create Network");
+
InputTensors inputTensors
{
{0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -685,7 +690,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
activation->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32, 0.0f, 0, true));
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
@@ -794,7 +799,7 @@ inline void StridedSliceInvalidSliceEndToEndTest(std::vector<BackendId> backends
input->GetOutputSlot(0).Connect(stridedSlice->GetInputSlot(0));
stridedSlice->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32, 0.0f, 0, true));
stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32));
// Attempt to optimize the network and check that the correct exception is thrown
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 2a4ccb6898..27e5aa0229 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -52,7 +52,7 @@ void FillEndToEnd(const std::vector<armnn::BackendId>& backends)
};
std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData);
- TensorInfo inputInfo ({ 4 }, DataType::Signed32);
+ TensorInfo inputInfo ({ 4 }, DataType::Signed32, 0.0f, 0, true);
TensorInfo outputInfo({ 1, 1, 5, 3 }, ArmnnType);
armnn::INetworkPtr network = CreateFillNetwork(inputInfo, outputInfo, descriptor);
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index f9bdfde622..878b6afeee 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -166,6 +166,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId
armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 3 }, ArmnnType);
inputTensorInfo.SetQuantizationScale(0.1f);
inputTensorInfo.SetQuantizationOffset(63);
+ inputTensorInfo.SetConstant(true);
armnn::TensorInfo outputTensorInfo({ 1, 2 }, ArmnnType);
outputTensorInfo.SetQuantizationScale(5.f);
@@ -174,6 +175,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId
armnn::TensorInfo weightsTensorInfo({ 2, 6 }, ArmnnType);
weightsTensorInfo.SetQuantizationScale(0.2f);
weightsTensorInfo.SetQuantizationOffset(93);
+ weightsTensorInfo.SetConstant(true);
FullyConnectedDescriptor descriptor;
descriptor.m_ConstantWeights = false;
@@ -236,10 +238,10 @@ void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::
unsigned int biasShape[] = { outputChannels };
- armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
- armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
- armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+ armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> input =
{
@@ -352,10 +354,10 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
unsigned int biasShape[] = { outputChannels };
- armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
- armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
- armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+ armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weights =
{
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index 431ef31437..4c67ec2c8e 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -42,6 +42,8 @@ void GatherEndToEnd(const std::vector<BackendId>& backends)
paramsInfo.SetQuantizationScale(1.0f);
paramsInfo.SetQuantizationOffset(0);
+ paramsInfo.SetConstant(true);
+ indicesInfo.SetConstant(true);
outputInfo.SetQuantizationScale(1.0f);
outputInfo.SetQuantizationOffset(0);
@@ -78,6 +80,8 @@ void GatherMultiDimEndToEnd(const std::vector<BackendId>& backends)
paramsInfo.SetQuantizationScale(1.0f);
paramsInfo.SetQuantizationOffset(0);
+ paramsInfo.SetConstant(true);
+ indicesInfo.SetConstant(true);
outputInfo.SetQuantizationScale(1.0f);
outputInfo.SetQuantizationOffset(0);
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index d758137b3b..e715e6b187 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -36,7 +36,7 @@ armnn::INetworkPtr CreateInstanceNormalizationNetwork(const armnn::TensorShape&
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
InstanceNormalizationDescriptor instanceNormalizationDesc;
instanceNormalizationDesc.m_Gamma = gamma;
@@ -104,7 +104,7 @@ void InstanceNormalizationNhwcEndToEndTest1(const std::vector<armnn::BackendId>&
const float gamma = 1.0f;
TensorShape inputShape{2, 2, 2, 2};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -174,7 +174,7 @@ void InstanceNormalizationNchwEndToEndTest1(const std::vector<armnn::BackendId>&
const float gamma = 1.0f;
TensorShape inputShape{2, 2, 2, 2};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -248,7 +248,7 @@ void InstanceNormalizationNhwcEndToEndTest2(const std::vector<armnn::BackendId>&
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
std::vector<float> inputData = std::vector<float>(
{
@@ -319,7 +319,7 @@ void InstanceNormalizationNchwEndToEndTest2(const std::vector<armnn::BackendId>&
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
std::vector<float> inputData = std::vector<float>(
{
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 94855aa7b2..226e2b3364 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -177,9 +177,11 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
};
std::vector<uint8_t> outputData(5);
+ TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo2, inputData.data())}
};
armnn::OutputTensors outputTensors
{
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index 1f7f57806e..181ecd912f 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -27,7 +27,7 @@ armnn::INetworkPtr CreateLogSoftmaxNetwork(const armnn::TensorShape& inputShape,
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
LogSoftmaxDescriptor logSoftmaxDesc;
logSoftmaxDesc.m_Beta = beta;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index b0ee9bee32..6eecaabf55 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -370,8 +370,8 @@ TEST_CASE("OptimizeNetworkCopy")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 2, 2, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -443,10 +443,12 @@ TEST_CASE("OptimizeNetworkCopy")
std::vector<float> inputData = GenerateRandomData<float>(runtime->GetInputTensorInfo(optNetId, 0).GetNumElements());
std::vector<float> outputData(runtime->GetOutputTensorInfo(optNetId, 0).GetNumElements());
+ armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(optNetId, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
{
- 0 ,armnn::ConstTensor(runtime->GetInputTensorInfo(optNetId, 0), inputData.data())
+ 0, armnn::ConstTensor(inputTensorInfo, inputData.data())
}
};
armnn::OutputTensors outputTensors
@@ -464,10 +466,12 @@ TEST_CASE("OptimizeNetworkCopy")
armnn::NetworkId netId = networkIds[i];
std::vector<float> copyOutputData(runtime->GetOutputTensorInfo(netId, 0).GetNumElements());
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors copyInputTensors
{
{
- 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())
+ 0, armnn::ConstTensor(inputTensorInfo2, inputData.data())
}
};
armnn::OutputTensors copyOutputTensors
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index e11553dd38..c31d084b0e 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -51,8 +51,10 @@ void PreluEndToEnd(const std::vector<BackendId>& backends,
inputInfo.SetQuantizationOffset(qOffset);
inputInfo.SetQuantizationScale(qScale);
+ inputInfo.SetConstant(true);
alphaInfo.SetQuantizationOffset(qOffset);
alphaInfo.SetQuantizationScale(qScale);
+ alphaInfo.SetConstant(true);
outputInfo.SetQuantizationOffset(qOffset);
outputInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index 281bed18e7..e2147fc59b 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -80,22 +80,22 @@ void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
const armnn::TensorInfo biasInfo({outputSize},
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
const armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset);
+ layerNormOffset, true);
// Mandatory params
const std::vector<int8_t> inputToForgetWeightsVector =
@@ -179,17 +179,17 @@ void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
const armnn::TensorInfo inputInfo({numBatches , inputSize},
armnn::DataType::QAsymmS8,
inputScale,
- inputOffset);
+ inputOffset, true);
const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
armnn::DataType::QSymmS16,
cellStateScale,
- cellStateOffset);
+ cellStateOffset, true);
const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
armnn::DataType::QAsymmS8,
outputScale,
- outputOffset);
+ outputOffset, true);
// Input tensor data
const std::vector<int8_t> inputVector = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index a2fadc7b92..f178951873 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -46,14 +46,14 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape,
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
- armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
+ armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true);
armnn::QuantizedLstmInputParams data;
@@ -210,9 +210,16 @@ void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
inputTensors.reserve(3);
// input
- inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
- inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), cellStateInVector.data())});
- inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), outputStateInVector.data())});
+ TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
+ inputTensors.push_back({0, ConstTensor(inputTensorInfo0, inputVector.data())});
+ inputTensors.push_back({1, ConstTensor(inputTensorInfo1, cellStateInVector.data())});
+ inputTensors.push_back({2, ConstTensor(inputTensorInfo2, outputStateInVector.data())});
OutputTensors outputTensors;
outputTensors.reserve(2);
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index 461b3b9be8..5229c47331 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -46,7 +46,7 @@ void RankEndToEnd(const std::vector<armnn::BackendId>& backends)
std::vector<int32_t> expectedOutputData{ 4 };
- TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType);
+ TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType, 0.0f, 0, true);
TensorShape outputShape (Dimensionality::Scalar);
TensorInfo outputInfo(outputShape, DataType::Signed32);
diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
index aa7af11feb..a56db44161 100644
--- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
@@ -57,7 +57,7 @@ void ResizeEndToEnd(const std::vector<armnn::BackendId>& backends,
const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
- TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+ TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
std::vector<float> inputData =
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index 4e5baade27..e3b016ee94 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -34,7 +34,7 @@ armnn::INetworkPtr CreateSpaceToDepthNetwork(const armnn::TensorShape& inputShap
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
armnnUtils::DataLayoutIndexed dimensionIndices(dataLayout);
if (inputShape[dimensionIndices.GetHeightIndex()] % blockSize!=0
@@ -102,7 +102,7 @@ void SpaceToDepthNhwcEndToEndTest1(const std::vector<armnn::BackendId>& defaultB
const unsigned int blockSize = 2;
TensorShape inputShape{1, 2, 2, 1};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{1, 1, 1, 4};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -133,7 +133,7 @@ void SpaceToDepthNchwEndToEndTest1(const std::vector<armnn::BackendId>& defaultB
const unsigned int blockSize = 2;
TensorShape inputShape{1, 2, 2, 1};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{1, 1, 1, 4};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -167,7 +167,7 @@ void SpaceToDepthNhwcEndToEndTest2(const std::vector<armnn::BackendId>& defaultB
TensorShape outputShape{1, 1, 1, 8};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
std::vector<float> inputData = std::vector<float>(
{
@@ -197,7 +197,7 @@ void SpaceToDepthNchwEndToEndTest2(const std::vector<armnn::BackendId>& defaultB
TensorShape inputShape{1, 2, 2, 2};
TensorShape outputShape{1, 1, 1, 8};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 64e24e54aa..3a2af6850c 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -31,7 +31,7 @@ INetworkPtr CreateSplitterNetwork(const TensorShape& inputShape,
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 764983f3b9..8ef5ecc203 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -62,8 +62,10 @@ void AsyncThreadedEndToEndTestImpl(INetworkPtr network,
inputTensors.reserve(inputTensorData.size());
for (auto&& it : inputTensorData[i])
{
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first);
+ inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
outputTensors.reserve(expectedOutputData.size());
@@ -146,8 +148,10 @@ void AsyncEndToEndTestImpl(INetworkPtr network,
inputTensors.reserve(inputTensorData.size());
for (auto&& it : inputTensorData)
{
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first);
+ inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
OutputTensors outputTensors;
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index 133829c43b..8f10869088 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -68,10 +68,10 @@ void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
- TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+ TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
- TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset);
- TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0);
+ TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true);
+ TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true);
std::vector<float> inputData =
{
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 495aa69bff..862ed2ecab 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -44,9 +44,11 @@ void RunInference(armnn::NetworkId& netId, armnn::IRuntimePtr& runtime, std::vec
1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
};
+ armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
armnn::OutputTensors outputTensors
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 60145139ff..c09d0b2bc2 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -67,7 +67,7 @@ armnn::INetworkPtr CreateTestNetwork(armnn::TensorInfo& inputTensorInfo)
armnn::FullyConnectedDescriptor fullyConnectedDesc;
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant(true);
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -145,9 +145,11 @@ TEST_CASE("ClCustomAllocatorTest")
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
std::fill_n(outputPtr, numElements, -10.0f);
+ armnn::TensorInfo inputTensorInfo2 = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), alignedInputPtr)},
+ {0, armnn::ConstTensor(inputTensorInfo2, alignedInputPtr)},
};
armnn::OutputTensors outputTensors
{
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 7721206d3d..7cd05d193b 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -35,6 +35,7 @@ TEST_CASE("ClImportEnabledFallbackToNeon")
sub->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32);
+ info.SetConstant(true);
input0->GetOutputSlot(0).SetTensorInfo(info);
input1->GetOutputSlot(0).SetTensorInfo(info);
@@ -181,6 +182,7 @@ TEST_CASE("ClImportDisabledFallbackToNeon")
sub->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+ info.SetConstant(true);
input0->GetOutputSlot(0).SetTensorInfo(info);
input1->GetOutputSlot(0).SetTensorInfo(info);
@@ -311,6 +313,7 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32);
+ info.SetConstant(true);
TensorInfo poolingInfo = TensorInfo({ 1, 2, 2, 1 }, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
@@ -468,6 +471,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+ info.SetConstant(true);
TensorInfo poolingInfo = TensorInfo({ 1, 2, 1, 1 }, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
@@ -536,6 +540,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
std::vector<float> expectedOutput{ 11.0f, -1.0f };
+
InputTensors inputTensors
{
{ 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 6b1d3521d5..0403d5379e 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -171,9 +171,11 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
std::fill_n(outputPtr, numElements, -10.0f);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), alignedInputPtr)},
+ {0,armnn::ConstTensor(inputTensorInfo, alignedInputPtr)},
};
OutputTensors outputTensors
{
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index 1974d4d856..b30a447f9f 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -88,10 +88,12 @@ TEST_CASE("Fp16AdditionTest")
100.0_h, 200.0_h, 300.0_h, 400.0_h
};
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
- {1,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ {0,ConstTensor(inputTensorInfo, input1Data.data())},
+ {1,ConstTensor(inputTensorInfo, input2Data.data())}
};
std::vector<Half> outputData(input1Data.size());
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index e7a56a4848..ae6cfae3fa 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -107,11 +107,18 @@ TEST_CASE("FallbackImportToCpuAcc")
11.0f, 9.0f, 7.0f, 5.0f, 3.0f, 1.0f, -1.0f, -3.0f, -5.0f, -7.0f, -9.0f, -11.0f
};
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, inputData2.data()) }
};
OutputTensors outputTensors
{
@@ -238,10 +245,15 @@ TEST_CASE("FallbackPaddingCopyToCpuAcc")
6.0f, 12.0f
};
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) }
};
OutputTensors outputTensors
{
@@ -374,11 +386,18 @@ TEST_CASE("FallbackImportFromCpuAcc")
13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
};
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, inputData2.data()) }
};
OutputTensors outputTensors
{
@@ -505,10 +524,15 @@ TEST_CASE("FallbackPaddingCopyFromCpuAcc")
5.0f, 15.0f
};
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) }
};
OutputTensors outputTensors
{
@@ -639,11 +663,18 @@ TEST_CASE("FallbackDisableImportFromCpuAcc")
13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
};
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, inputData2.data()) }
};
OutputTensors outputTensors
{
@@ -784,11 +815,18 @@ TEST_CASE("NeonImportEnabledFallbackToCl")
auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
std::copy(inputData2.begin(), inputData2.end(), intputPtr);
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), alignedInputPtr) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, alignedInputPtr) }
};
OutputTensors outputTensors
{
@@ -910,11 +948,18 @@ TEST_CASE("NeonImportDisabledFallbackToCl")
11.0f, 9.0f, 7.0f, 5.0f, 3.0f, 1.0f, -1.0f, -3.0f, -5.0f, -7.0f, -9.0f, -11.0f
};
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, inputData2.data()) }
};
OutputTensors outputTensors
{
@@ -1061,11 +1106,18 @@ TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
std::copy(inputData2.begin(), inputData2.end(), intputPtr);
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), alignedInputPtr) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, alignedInputPtr) }
};
OutputTensors outputTensors
{
@@ -1200,11 +1252,18 @@ TEST_CASE("NeonImportDisableFallbackSubgraphToCl")
std::vector<float> expectedOutput{ 11.0f, -1.0f };
+ armnn::TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ armnn::TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
InputTensors inputTensors
{
- { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
- { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
- { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+ { 0, armnn::ConstTensor(inputTensorInfo0, inputData0.data()) },
+ { 1, armnn::ConstTensor(inputTensorInfo1, inputData1.data()) },
+ { 2, armnn::ConstTensor(inputTensorInfo2, inputData2.data()) }
};
OutputTensors outputTensors
{
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index eabf3c8e9a..2e6854a331 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -419,8 +419,10 @@ TEST_CASE("SplitteronXorYNoPaddingRequiredTest")
inputTensors.reserve(inputTensorData.size());
for (auto&& it : inputTensorData)
{
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
+ inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, it.first), it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
OutputTensors outputTensors;
outputTensors.reserve(expectedOutputData.size());
@@ -594,8 +596,10 @@ TEST_CASE("SplitteronXorYPaddingRequiredTest")
inputTensors.reserve(inputTensorData.size());
for (auto&& it : inputTensorData)
{
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
+ inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, it.first), it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
OutputTensors outputTensors;
outputTensors.reserve(expectedOutputData.size());
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index dc4dcecd81..4444f5c361 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -133,9 +133,11 @@ TEST_CASE("Unsigned8")
};
std::vector<uint8_t> outputData(5);
+ TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo2, inputData.data())}
};
armnn::OutputTensors outputTensors
{
@@ -199,10 +201,12 @@ TEST_CASE("TrivialAdd")
};
std::vector<float> outputData(12);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
- {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, input1Data.data())},
+ {1,armnn::ConstTensor(inputTensorInfo, input2Data.data())}
};
OutputTensors outputTensors
{
@@ -293,9 +297,11 @@ TEST_CASE("MultipleOutputs")
std::vector<float> output2Data(inputData.size());
std::vector<float> output3Data(inputData.size());
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{
@@ -357,10 +363,12 @@ TEST_CASE("TrivialMin")
};
std::vector<float> outputData(4);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
- {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, input1Data.data())},
+ {1,armnn::ConstTensor(inputTensorInfo, input2Data.data())}
};
OutputTensors outputTensors
{
diff --git a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
index 0d6f8422fc..a96de66ecd 100644
--- a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
+++ b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
@@ -90,9 +90,11 @@ TEST_CASE("TestFileOnlyProfiling")
outputData[i] = 3.0;
}
+ TensorInfo inputTensorInfo = runtime.GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{
@@ -213,9 +215,11 @@ TEST_CASE("DumpOutgoingValidFileEndToEnd")
outputData[i] = 3.0;
}
+ TensorInfo inputTensorInfo = runtime.GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{
diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp
index 79b8ea41da..e0d3dd717c 100644
--- a/src/profiling/test/ProfilingTestUtils.cpp
+++ b/src/profiling/test/ProfilingTestUtils.cpp
@@ -381,8 +381,8 @@ void VerifyPostOptimisationStructureTestImpl(armnn::BackendId backendId)
// Convolution details
TensorInfo inputInfo({ 1, 2, 5, 1 }, DataType::Float32);
- TensorInfo weightInfo({ 3, 2, 3, 1 }, DataType::Float32);
- TensorInfo biasInfo({ 3 }, DataType::Float32);
+ TensorInfo weightInfo({ 3, 2, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+ TensorInfo biasInfo({ 3 }, DataType::Float32, 0.0f, 0, true);
TensorInfo outputInfo({ 1, 3, 7, 1 }, DataType::Float32);
std::vector<float> weightsData{
1.0f, 0.0f, 0.0f,
@@ -742,9 +742,11 @@ void VerifyPostOptimisationStructureTestImpl(armnn::BackendId backendId)
std::vector<float> inputData(inputInfo.GetNumElements());
std::vector<float> outputData(outputInfo.GetNumElements());
+ TensorInfo inputTensorInfo = runtime.GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{