aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp2
-rw-r--r--src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp6
-rw-r--r--src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp1
-rw-r--r--src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp6
-rw-r--r--src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp23
-rw-r--r--src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp14
-rw-r--r--src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp12
-rw-r--r--src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp19
-rw-r--r--src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp8
-rw-r--r--src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp6
31 files changed, 105 insertions, 67 deletions
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index fe681936f1..fcdad3e21b 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -33,7 +33,7 @@ armnn::ConstTensor PermuteTensor(const ConstTensorHandle* tensor,
{
::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes());
}
-
+ tensorInfo.SetConstant(true);
return ConstTensor(tensorInfo, permuteBuffer);
}
diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
index 0b1bf772ce..f7d4596450 100644
--- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
@@ -127,7 +127,7 @@ void EluEndToEndTest(const std::vector<BackendId>& backends)
float qScale = 1.0f;
int32_t qOffset = 0;
- armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0);
@@ -156,7 +156,7 @@ void HardSwishEndToEndTest(const std::vector<BackendId>& backends)
float qScale = 1.0f;
int32_t qOffset = 0;
- armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
armnn::ActivationDescriptor descriptor(ActivationFunction::HardSwish, 1.0);
diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
index 2ffe06f218..041f9f8f17 100644
--- a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
@@ -47,7 +47,7 @@ void ArgMinMaxEndToEndImpl(const armnn::TensorShape& inputShape,
const float qScale = armnn::IsQuantizedType<T>() ? 2.0f : 1.0f;
const int32_t qOffset = armnn::IsQuantizedType<T>() ? 2 : 0;
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset, true);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
// quantize data
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index 254b3c20a0..859694ceb2 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -30,7 +30,7 @@ INetworkPtr CreateBatchToSpaceNdNetwork(const armnn::TensorShape& inputShape,
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
BatchToSpaceNdDescriptor batchToSpaceNdDesc(blockShape, crops);
diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
index 9ec764402e..7d46be7bcb 100644
--- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
@@ -37,6 +37,7 @@ void ChannelShuffleEndToEnd(const std::vector<BackendId>& backends)
inputInfo.SetQuantizationScale(1.0f);
inputInfo.SetQuantizationOffset(0);
+ inputInfo.SetConstant(true);
outputInfo.SetQuantizationScale(1.0f);
outputInfo.SetQuantizationOffset(0);
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index 40e3fd62ee..e274163c6f 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -35,7 +35,7 @@ INetworkPtr CreateComparisonNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
- TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset, true);
IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(input, comparisonLayer, inputTensorInfo, 0, i);
}
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index 5b2f33fc1a..62f0e4cd36 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -39,7 +39,7 @@ INetworkPtr CreateConcatNetwork(const std::vector<TensorShape>& inputShapes,
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
- TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset, true);
IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
Connect(input, concat, inputTensorInfo, 0, i);
}
diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
index 33bf9a180b..b1f685b4cd 100644
--- a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
@@ -56,7 +56,7 @@ void Convolution3dEndToEnd(const std::vector<armnn::BackendId>& backends,
const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
- TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset);
+ TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset, true);
TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index ea997290e5..0a4c29b56d 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -120,7 +120,7 @@ std::unique_ptr<Workload> CreateWorkload(TensorInfo info, ITensorHandle* inputTe
TEST_CASE("TestAsyncExecute")
{
- TensorInfo info({5}, DataType::Signed32);
+ TensorInfo info({5}, DataType::Signed32, 0.0, 0, true);
int inVals[5]{2, 2, 2, 2, 2};
int outVals[5]{1, 1, 1, 1, 1};
@@ -157,7 +157,7 @@ TEST_CASE("TestAsyncExecute")
TEST_CASE("TestDefaultAsyncExecute")
{
- TensorInfo info({5}, DataType::Signed32);
+ TensorInfo info({5}, DataType::Signed32, 0.0f, 0, true);
std::vector<int> inVals{2, 2, 2, 2, 2};
std::vector<int> outVals{1, 1, 1, 1, 1};
@@ -193,7 +193,7 @@ TEST_CASE("TestDefaultAsyncExeuteWithThreads")
{
// Use a large vector so the threads have a chance to interact
unsigned int vecSize = 1000;
- TensorInfo info({vecSize}, DataType::Signed32);
+ TensorInfo info({vecSize}, DataType::Signed32, 0.0f, 0, true);
std::vector<int> inVals1(vecSize, 2);
std::vector<int> outVals1(vecSize, 1);
diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
index c6176aef5b..b64e618075 100644
--- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
@@ -44,6 +44,7 @@ void DepthToSpaceEndToEndImpl(const std::vector<armnn::BackendId>& backends,
using namespace armnn;
TensorInfo inputInfo(nhwcInputShape, ArmnnType);
+ inputInfo.SetConstant(true);
TensorInfo outputInfo(nhwcOutputShape, ArmnnType);
constexpr float qScale = 0.25f;
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index a5e2faccc9..fff4c4fab9 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -43,6 +43,7 @@ void DequantizeEndToEndLayerTestImpl(const std::vector<BackendId>& backends,
inputInfo.SetQuantizationScale(scale);
inputInfo.SetQuantizationOffset(offset);
+ inputInfo.SetConstant(true);
// Builds up the structure of the network
armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index a566964ba2..c4488865a1 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -82,10 +82,13 @@ void DetectionPostProcessEndToEnd(const std::vector<BackendId>& backends, bool u
boxEncodingsInfo.SetQuantizationScale(boxScale);
boxEncodingsInfo.SetQuantizationOffset(boxOffset);
+ boxEncodingsInfo.SetConstant(true);
scoresInfo.SetQuantizationScale(scoreScale);
scoresInfo.SetQuantizationOffset(scoreOffset);
+ scoresInfo.SetConstant(true);
anchorsInfo.SetQuantizationScale(anchorScale);
anchorsInfo.SetQuantizationOffset(anchorOffset);
+ anchorsInfo.SetConstant(true);
// Builds up the structure of the network
armnn::INetworkPtr net = CreateDetectionPostProcessNetwork<T>(boxEncodingsInfo, scoresInfo,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 046ee3a488..0d9d3dd31b 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1594,10 +1594,12 @@ void SampleDynamicBackendEndToEndTestImpl()
std::vector<float> expectedOutputData{ 15.0f, 11.0f };
std::vector<float> outputData(2);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())},
- {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
+ {1,armnn::ConstTensor(inputTensorInfo, input1Data.data())}
};
OutputTensors outputTensors
{
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index f958613d02..635dc96720 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -33,7 +33,7 @@ INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape,
ElementwiseUnaryDescriptor descriptor(operation);
IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
- TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset, true);
IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 2d268f8ea1..269a46077e 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -79,7 +79,8 @@ bool ConstantUsageTest(const std::vector<BackendId>& computeDevice,
inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
{
- const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+ TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+ commonTensorInfo.SetConstant(true);
return ConstantUsageTest(backends,
commonTensorInfo,
@@ -98,6 +99,7 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
commonTensorInfo.SetQuantizationScale(scale);
commonTensorInfo.SetQuantizationOffset(offset);
+ commonTensorInfo.SetConstant(true);
return ConstantUsageTest(backends,
commonTensorInfo,
@@ -198,7 +200,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
@@ -263,7 +265,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
@@ -334,7 +336,7 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
@@ -418,7 +420,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
@@ -449,6 +451,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
};
INFO("Create Network");
+
InputTensors inputTensors
{
{0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -507,7 +510,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
@@ -536,6 +539,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
};
INFO("Create Network");
+
InputTensors inputTensors
{
{0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -594,7 +598,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
@@ -624,6 +628,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
};
INFO("Create Network");
+
InputTensors inputTensors
{
{0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -685,7 +690,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
activation->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32, 0.0f, 0, true));
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
@@ -794,7 +799,7 @@ inline void StridedSliceInvalidSliceEndToEndTest(std::vector<BackendId> backends
input->GetOutputSlot(0).Connect(stridedSlice->GetInputSlot(0));
stridedSlice->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32));
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32, 0.0f, 0, true));
stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32));
// Attempt to optimize the network and check that the correct exception is thrown
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 2a4ccb6898..27e5aa0229 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -52,7 +52,7 @@ void FillEndToEnd(const std::vector<armnn::BackendId>& backends)
};
std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData);
- TensorInfo inputInfo ({ 4 }, DataType::Signed32);
+ TensorInfo inputInfo ({ 4 }, DataType::Signed32, 0.0f, 0, true);
TensorInfo outputInfo({ 1, 1, 5, 3 }, ArmnnType);
armnn::INetworkPtr network = CreateFillNetwork(inputInfo, outputInfo, descriptor);
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index f9bdfde622..878b6afeee 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -166,6 +166,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId
armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 3 }, ArmnnType);
inputTensorInfo.SetQuantizationScale(0.1f);
inputTensorInfo.SetQuantizationOffset(63);
+ inputTensorInfo.SetConstant(true);
armnn::TensorInfo outputTensorInfo({ 1, 2 }, ArmnnType);
outputTensorInfo.SetQuantizationScale(5.f);
@@ -174,6 +175,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId
armnn::TensorInfo weightsTensorInfo({ 2, 6 }, ArmnnType);
weightsTensorInfo.SetQuantizationScale(0.2f);
weightsTensorInfo.SetQuantizationOffset(93);
+ weightsTensorInfo.SetConstant(true);
FullyConnectedDescriptor descriptor;
descriptor.m_ConstantWeights = false;
@@ -236,10 +238,10 @@ void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::
unsigned int biasShape[] = { outputChannels };
- armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
- armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
- armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+ armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> input =
{
@@ -352,10 +354,10 @@ void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
unsigned int biasShape[] = { outputChannels };
- armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
- armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
- armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+ armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weights =
{
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index 431ef31437..4c67ec2c8e 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -42,6 +42,8 @@ void GatherEndToEnd(const std::vector<BackendId>& backends)
paramsInfo.SetQuantizationScale(1.0f);
paramsInfo.SetQuantizationOffset(0);
+ paramsInfo.SetConstant(true);
+ indicesInfo.SetConstant(true);
outputInfo.SetQuantizationScale(1.0f);
outputInfo.SetQuantizationOffset(0);
@@ -78,6 +80,8 @@ void GatherMultiDimEndToEnd(const std::vector<BackendId>& backends)
paramsInfo.SetQuantizationScale(1.0f);
paramsInfo.SetQuantizationOffset(0);
+ paramsInfo.SetConstant(true);
+ indicesInfo.SetConstant(true);
outputInfo.SetQuantizationScale(1.0f);
outputInfo.SetQuantizationOffset(0);
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index d758137b3b..e715e6b187 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -36,7 +36,7 @@ armnn::INetworkPtr CreateInstanceNormalizationNetwork(const armnn::TensorShape&
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
InstanceNormalizationDescriptor instanceNormalizationDesc;
instanceNormalizationDesc.m_Gamma = gamma;
@@ -104,7 +104,7 @@ void InstanceNormalizationNhwcEndToEndTest1(const std::vector<armnn::BackendId>&
const float gamma = 1.0f;
TensorShape inputShape{2, 2, 2, 2};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -174,7 +174,7 @@ void InstanceNormalizationNchwEndToEndTest1(const std::vector<armnn::BackendId>&
const float gamma = 1.0f;
TensorShape inputShape{2, 2, 2, 2};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -248,7 +248,7 @@ void InstanceNormalizationNhwcEndToEndTest2(const std::vector<armnn::BackendId>&
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
std::vector<float> inputData = std::vector<float>(
{
@@ -319,7 +319,7 @@ void InstanceNormalizationNchwEndToEndTest2(const std::vector<armnn::BackendId>&
TensorShape outputShape{2, 2, 2, 2};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
std::vector<float> inputData = std::vector<float>(
{
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 94855aa7b2..226e2b3364 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -177,9 +177,11 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
};
std::vector<uint8_t> outputData(5);
+ TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo2, inputData.data())}
};
armnn::OutputTensors outputTensors
{
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index 1f7f57806e..181ecd912f 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -27,7 +27,7 @@ armnn::INetworkPtr CreateLogSoftmaxNetwork(const armnn::TensorShape& inputShape,
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
LogSoftmaxDescriptor logSoftmaxDesc;
logSoftmaxDesc.m_Beta = beta;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index b0ee9bee32..6eecaabf55 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -370,8 +370,8 @@ TEST_CASE("OptimizeNetworkCopy")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 2, 2, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -443,10 +443,12 @@ TEST_CASE("OptimizeNetworkCopy")
std::vector<float> inputData = GenerateRandomData<float>(runtime->GetInputTensorInfo(optNetId, 0).GetNumElements());
std::vector<float> outputData(runtime->GetOutputTensorInfo(optNetId, 0).GetNumElements());
+ armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(optNetId, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
{
- 0 ,armnn::ConstTensor(runtime->GetInputTensorInfo(optNetId, 0), inputData.data())
+ 0, armnn::ConstTensor(inputTensorInfo, inputData.data())
}
};
armnn::OutputTensors outputTensors
@@ -464,10 +466,12 @@ TEST_CASE("OptimizeNetworkCopy")
armnn::NetworkId netId = networkIds[i];
std::vector<float> copyOutputData(runtime->GetOutputTensorInfo(netId, 0).GetNumElements());
+ armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors copyInputTensors
{
{
- 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())
+ 0, armnn::ConstTensor(inputTensorInfo2, inputData.data())
}
};
armnn::OutputTensors copyOutputTensors
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index e11553dd38..c31d084b0e 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -51,8 +51,10 @@ void PreluEndToEnd(const std::vector<BackendId>& backends,
inputInfo.SetQuantizationOffset(qOffset);
inputInfo.SetQuantizationScale(qScale);
+ inputInfo.SetConstant(true);
alphaInfo.SetQuantizationOffset(qOffset);
alphaInfo.SetQuantizationScale(qScale);
+ alphaInfo.SetConstant(true);
outputInfo.SetQuantizationOffset(qOffset);
outputInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index 281bed18e7..e2147fc59b 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -80,22 +80,22 @@ void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
const armnn::TensorInfo biasInfo({outputSize},
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
const armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset);
+ layerNormOffset, true);
// Mandatory params
const std::vector<int8_t> inputToForgetWeightsVector =
@@ -179,17 +179,17 @@ void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
const armnn::TensorInfo inputInfo({numBatches , inputSize},
armnn::DataType::QAsymmS8,
inputScale,
- inputOffset);
+ inputOffset, true);
const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
armnn::DataType::QSymmS16,
cellStateScale,
- cellStateOffset);
+ cellStateOffset, true);
const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
armnn::DataType::QAsymmS8,
outputScale,
- outputOffset);
+ outputOffset, true);
// Input tensor data
const std::vector<int8_t> inputVector = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index a2fadc7b92..f178951873 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -46,14 +46,14 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape,
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
- armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
+ armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true);
armnn::QuantizedLstmInputParams data;
@@ -210,9 +210,16 @@ void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
inputTensors.reserve(3);
// input
- inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
- inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), cellStateInVector.data())});
- inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), outputStateInVector.data())});
+ TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+ TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+ TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+ inputTensorInfo0.SetConstant(true);
+ inputTensorInfo1.SetConstant(true);
+ inputTensorInfo2.SetConstant(true);
+
+ inputTensors.push_back({0, ConstTensor(inputTensorInfo0, inputVector.data())});
+ inputTensors.push_back({1, ConstTensor(inputTensorInfo1, cellStateInVector.data())});
+ inputTensors.push_back({2, ConstTensor(inputTensorInfo2, outputStateInVector.data())});
OutputTensors outputTensors;
outputTensors.reserve(2);
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index 461b3b9be8..5229c47331 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -46,7 +46,7 @@ void RankEndToEnd(const std::vector<armnn::BackendId>& backends)
std::vector<int32_t> expectedOutputData{ 4 };
- TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType);
+ TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType, 0.0f, 0, true);
TensorShape outputShape (Dimensionality::Scalar);
TensorInfo outputInfo(outputShape, DataType::Signed32);
diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
index aa7af11feb..a56db44161 100644
--- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
@@ -57,7 +57,7 @@ void ResizeEndToEnd(const std::vector<armnn::BackendId>& backends,
const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
- TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+ TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
std::vector<float> inputData =
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index 4e5baade27..e3b016ee94 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -34,7 +34,7 @@ armnn::INetworkPtr CreateSpaceToDepthNetwork(const armnn::TensorShape& inputShap
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
armnnUtils::DataLayoutIndexed dimensionIndices(dataLayout);
if (inputShape[dimensionIndices.GetHeightIndex()] % blockSize!=0
@@ -102,7 +102,7 @@ void SpaceToDepthNhwcEndToEndTest1(const std::vector<armnn::BackendId>& defaultB
const unsigned int blockSize = 2;
TensorShape inputShape{1, 2, 2, 1};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{1, 1, 1, 4};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -133,7 +133,7 @@ void SpaceToDepthNchwEndToEndTest1(const std::vector<armnn::BackendId>& defaultB
const unsigned int blockSize = 2;
TensorShape inputShape{1, 2, 2, 1};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorShape outputShape{1, 1, 1, 4};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -167,7 +167,7 @@ void SpaceToDepthNhwcEndToEndTest2(const std::vector<armnn::BackendId>& defaultB
TensorShape outputShape{1, 1, 1, 8};
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
std::vector<float> inputData = std::vector<float>(
{
@@ -197,7 +197,7 @@ void SpaceToDepthNchwEndToEndTest2(const std::vector<armnn::BackendId>& defaultB
TensorShape inputShape{1, 2, 2, 2};
TensorShape outputShape{1, 1, 1, 8};
- TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
TensorInfo outputTensorInfo(outputShape, DataType::Float32);
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 64e24e54aa..3a2af6850c 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -31,7 +31,7 @@ INetworkPtr CreateSplitterNetwork(const TensorShape& inputShape,
// Builds up the structure of the network.
INetworkPtr net(INetwork::Create());
- TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+ TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 764983f3b9..8ef5ecc203 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -62,8 +62,10 @@ void AsyncThreadedEndToEndTestImpl(INetworkPtr network,
inputTensors.reserve(inputTensorData.size());
for (auto&& it : inputTensorData[i])
{
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first);
+ inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
outputTensors.reserve(expectedOutputData.size());
@@ -146,8 +148,10 @@ void AsyncEndToEndTestImpl(INetworkPtr network,
inputTensors.reserve(inputTensorData.size());
for (auto&& it : inputTensorData)
{
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first);
+ inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
OutputTensors outputTensors;
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index 133829c43b..8f10869088 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -68,10 +68,10 @@ void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
- TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+ TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
- TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset);
- TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0);
+ TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true);
+ TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true);
std::vector<float> inputData =
{