From 5b8093c17044e8eaaaa42d96ba4902dee5791be4 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Fri, 22 Oct 2021 11:12:07 +0100 Subject: IVGCVSW-6420: Constant flag in tensor info is not set correctly !android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148 --- .../test/ActivationEndToEndTestImpl.hpp | 4 ++-- .../test/ArgMinMaxEndToEndTestImpl.hpp | 2 +- .../test/BatchToSpaceNdEndToEndTestImpl.hpp | 2 +- .../test/ChannelShuffleEndToEndTestImpl.hpp | 1 + .../test/ComparisonEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/ConcatEndToEndTestImpl.hpp | 2 +- .../test/Convolution3dEndToEndTestImpl.hpp | 2 +- .../test/DefaultAsyncExecuteTest.cpp | 6 +++--- .../test/DepthToSpaceEndToEndTestImpl.hpp | 1 + .../test/DequantizeEndToEndTestImpl.hpp | 1 + .../test/DetectionPostProcessEndToEndTestImpl.hpp | 3 +++ .../backendsCommon/test/DynamicBackendTests.hpp | 6 ++++-- .../test/ElementwiseUnaryEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/EndToEndTestImpl.hpp | 23 +++++++++++++--------- .../backendsCommon/test/FillEndToEndTestImpl.hpp | 2 +- .../test/FullyConnectedEndToEndTestImpl.hpp | 14 +++++++------ .../backendsCommon/test/GatherEndToEndTestImpl.hpp | 4 ++++ .../test/InstanceNormalizationEndToEndTestImpl.cpp | 10 +++++----- .../backendsCommon/test/JsonPrinterTestImpl.cpp | 4 +++- .../test/LogSoftmaxEndToEndTestImpl.cpp | 2 +- .../backendsCommon/test/OptimizedNetworkTests.cpp | 12 +++++++---- .../backendsCommon/test/PreluEndToEndTestImpl.hpp | 2 ++ .../backendsCommon/test/QLstmEndToEndTestImpl.cpp | 14 ++++++------- .../test/QuantizedLstmEndToEndTestImpl.cpp | 19 ++++++++++++------ .../backendsCommon/test/RankEndToEndTestImpl.hpp | 2 +- .../backendsCommon/test/ResizeEndToEndTestImpl.hpp | 2 +- .../test/SpaceToDepthEndToEndTestImpl.cpp | 10 +++++----- .../test/SplitterEndToEndTestImpl.hpp | 2 +- .../test/StridedSliceAsyncEndToEndTest.hpp | 8 ++++++-- .../TransposeConvolution2dEndToEndTestImpl.hpp | 6 +++--- 30 files changed, 104 insertions(+), 66 deletions(-) (limited to 'src/backends/backendsCommon/test') diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp index 0b1bf772ce..f7d4596450 100644 --- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp @@ -127,7 +127,7 @@ void EluEndToEndTest(const std::vector& backends) float qScale = 1.0f; int32_t qOffset = 0; - armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); + armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true); armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0); @@ -156,7 +156,7 @@ void HardSwishEndToEndTest(const std::vector& backends) float qScale = 1.0f; int32_t qOffset = 0; - armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); + armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true); armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); armnn::ActivationDescriptor descriptor(ActivationFunction::HardSwish, 1.0); diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp index 2ffe06f218..041f9f8f17 100644 --- a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp @@ -47,7 +47,7 @@ void ArgMinMaxEndToEndImpl(const armnn::TensorShape& inputShape, const float qScale = armnn::IsQuantizedType() ? 2.0f : 1.0f; const int32_t qOffset = armnn::IsQuantizedType() ? 2 : 0; - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset, true); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); // quantize data diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp index 254b3c20a0..859694ceb2 100644 --- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp @@ -30,7 +30,7 @@ INetworkPtr CreateBatchToSpaceNdNetwork(const armnn::TensorShape& inputShape, // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); BatchToSpaceNdDescriptor batchToSpaceNdDesc(blockShape, crops); diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp index 9ec764402e..7d46be7bcb 100644 --- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp @@ -37,6 +37,7 @@ void ChannelShuffleEndToEnd(const std::vector& backends) inputInfo.SetQuantizationScale(1.0f); inputInfo.SetQuantizationOffset(0); + inputInfo.SetConstant(true); outputInfo.SetQuantizationScale(1.0f); outputInfo.SetQuantizationOffset(0); diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp index 40e3fd62ee..e274163c6f 100644 --- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp @@ -35,7 +35,7 @@ INetworkPtr CreateComparisonNetwork(const std::vector& inputShapes, for (unsigned int i = 0; i < inputShapes.size(); ++i) { - TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset); + TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset, true); IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast(i)); Connect(input, comparisonLayer, inputTensorInfo, 0, i); } diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp index 5b2f33fc1a..62f0e4cd36 100644 --- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp @@ -39,7 +39,7 @@ INetworkPtr CreateConcatNetwork(const std::vector& inputShapes, for (unsigned int i = 0; i < inputShapes.size(); ++i) { - TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset, true); IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast(i)); Connect(input, concat, inputTensorInfo, 0, i); } diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp index 33bf9a180b..b1f685b4cd 100644 --- a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp @@ -56,7 +56,7 @@ void Convolution3dEndToEnd(const std::vector& backends, const float qScale = IsQuantizedType() ? 0.25f : 1.0f; const int32_t qOffset = IsQuantizedType() ? 50 : 0; - TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset); + TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset, true); TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset); TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true); TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true); diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp index ea997290e5..0a4c29b56d 100644 --- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp +++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp @@ -120,7 +120,7 @@ std::unique_ptr CreateWorkload(TensorInfo info, ITensorHandle* inputTe TEST_CASE("TestAsyncExecute") { - TensorInfo info({5}, DataType::Signed32); + TensorInfo info({5}, DataType::Signed32, 0.0, 0, true); int inVals[5]{2, 2, 2, 2, 2}; int outVals[5]{1, 1, 1, 1, 1}; @@ -157,7 +157,7 @@ TEST_CASE("TestAsyncExecute") TEST_CASE("TestDefaultAsyncExecute") { - TensorInfo info({5}, DataType::Signed32); + TensorInfo info({5}, DataType::Signed32, 0.0f, 0, true); std::vector inVals{2, 2, 2, 2, 2}; std::vector outVals{1, 1, 1, 1, 1}; @@ -193,7 +193,7 @@ TEST_CASE("TestDefaultAsyncExeuteWithThreads") { // Use a large vector so the threads have a chance to interact unsigned int vecSize = 1000; - TensorInfo info({vecSize}, DataType::Signed32); + TensorInfo info({vecSize}, DataType::Signed32, 0.0f, 0, true); std::vector inVals1(vecSize, 2); std::vector outVals1(vecSize, 1); diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp index c6176aef5b..b64e618075 100644 --- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp @@ -44,6 +44,7 @@ void DepthToSpaceEndToEndImpl(const std::vector& backends, using namespace armnn; TensorInfo inputInfo(nhwcInputShape, ArmnnType); + inputInfo.SetConstant(true); TensorInfo outputInfo(nhwcOutputShape, ArmnnType); constexpr float qScale = 0.25f; diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp index a5e2faccc9..fff4c4fab9 100644 --- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp @@ -43,6 +43,7 @@ void DequantizeEndToEndLayerTestImpl(const std::vector& backends, inputInfo.SetQuantizationScale(scale); inputInfo.SetQuantizationOffset(offset); + inputInfo.SetConstant(true); // Builds up the structure of the network armnn::INetworkPtr net = CreateDequantizeNetwork(inputInfo, outputInfo); diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp index a566964ba2..c4488865a1 100644 --- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp @@ -82,10 +82,13 @@ void DetectionPostProcessEndToEnd(const std::vector& backends, bool u boxEncodingsInfo.SetQuantizationScale(boxScale); boxEncodingsInfo.SetQuantizationOffset(boxOffset); + boxEncodingsInfo.SetConstant(true); scoresInfo.SetQuantizationScale(scoreScale); scoresInfo.SetQuantizationOffset(scoreOffset); + scoresInfo.SetConstant(true); anchorsInfo.SetQuantizationScale(anchorScale); anchorsInfo.SetQuantizationOffset(anchorOffset); + anchorsInfo.SetConstant(true); // Builds up the structure of the network armnn::INetworkPtr net = CreateDetectionPostProcessNetwork(boxEncodingsInfo, scoresInfo, diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp index 046ee3a488..0d9d3dd31b 100644 --- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp +++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp @@ -1594,10 +1594,12 @@ void SampleDynamicBackendEndToEndTestImpl() std::vector expectedOutputData{ 15.0f, 11.0f }; std::vector outputData(2); + TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0); + inputTensorInfo.SetConstant(true); InputTensors inputTensors { - {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())}, - {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())} + {0,armnn::ConstTensor(inputTensorInfo, input0Data.data())}, + {1,armnn::ConstTensor(inputTensorInfo, input1Data.data())} }; OutputTensors outputTensors { diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp index f958613d02..635dc96720 100644 --- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp @@ -33,7 +33,7 @@ INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape, ElementwiseUnaryDescriptor descriptor(operation); IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary"); - TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset); + TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset, true); IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast(0)); Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0); diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 2d268f8ea1..269a46077e 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -79,7 +79,8 @@ bool ConstantUsageTest(const std::vector& computeDevice, inline bool ConstantUsageFloat32Test(const std::vector& backends) { - const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32); + TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32); + commonTensorInfo.SetConstant(true); return ConstantUsageTest(backends, commonTensorInfo, @@ -98,6 +99,7 @@ inline bool ConstantUsageUint8Test(const std::vector& backends) commonTensorInfo.SetQuantizationScale(scale); commonTensorInfo.SetQuantizationOffset(offset); + commonTensorInfo.SetConstant(true); return ConstantUsageTest(backends, commonTensorInfo, @@ -198,7 +200,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network @@ -263,7 +265,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network @@ -334,7 +336,7 @@ inline void ImportAlignedPointerTest(std::vector backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network @@ -418,7 +420,7 @@ inline void ImportOnlyWorkload(std::vector backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network @@ -449,6 +451,7 @@ inline void ImportOnlyWorkload(std::vector backends) }; INFO("Create Network"); + InputTensors inputTensors { {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, @@ -507,7 +510,7 @@ inline void ExportOnlyWorkload(std::vector backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network @@ -536,6 +539,7 @@ inline void ExportOnlyWorkload(std::vector backends) }; INFO("Create Network"); + InputTensors inputTensors { {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, @@ -594,7 +598,7 @@ inline void ImportAndExportWorkload(std::vector backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); @@ -624,6 +628,7 @@ inline void ImportAndExportWorkload(std::vector backends) }; INFO("Create Network"); + InputTensors inputTensors { {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, @@ -685,7 +690,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vectorGetOutputSlot(0).Connect(output0->GetInputSlot(0)); activation->GetOutputSlot(0).Connect(output1->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32, 0.0f, 0, true)); activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); // Optimize the network @@ -794,7 +799,7 @@ inline void StridedSliceInvalidSliceEndToEndTest(std::vector backends input->GetOutputSlot(0).Connect(stridedSlice->GetInputSlot(0)); stridedSlice->GetOutputSlot(0).Connect(output0->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32, 0.0f, 0, true)); stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32)); // Attempt to optimize the network and check that the correct exception is thrown diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp index 2a4ccb6898..27e5aa0229 100644 --- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp @@ -52,7 +52,7 @@ void FillEndToEnd(const std::vector& backends) }; std::vector expectedOutputData = armnnUtils::QuantizedVector(floatExpectedOutputData); - TensorInfo inputInfo ({ 4 }, DataType::Signed32); + TensorInfo inputInfo ({ 4 }, DataType::Signed32, 0.0f, 0, true); TensorInfo outputInfo({ 1, 1, 5, 3 }, ArmnnType); armnn::INetworkPtr network = CreateFillNetwork(inputInfo, outputInfo, descriptor); diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp index f9bdfde622..878b6afeee 100644 --- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp @@ -166,6 +166,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector input = { @@ -352,10 +354,10 @@ void FullyConnectedErrorChecking(const std::vector& backends, unsigned int biasShape[] = { outputChannels }; - armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true); armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32); - armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32); - armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32); + armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true); + armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true); std::vector weights = { diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp index 431ef31437..4c67ec2c8e 100644 --- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp @@ -42,6 +42,8 @@ void GatherEndToEnd(const std::vector& backends) paramsInfo.SetQuantizationScale(1.0f); paramsInfo.SetQuantizationOffset(0); + paramsInfo.SetConstant(true); + indicesInfo.SetConstant(true); outputInfo.SetQuantizationScale(1.0f); outputInfo.SetQuantizationOffset(0); @@ -78,6 +80,8 @@ void GatherMultiDimEndToEnd(const std::vector& backends) paramsInfo.SetQuantizationScale(1.0f); paramsInfo.SetQuantizationOffset(0); + paramsInfo.SetConstant(true); + indicesInfo.SetConstant(true); outputInfo.SetQuantizationScale(1.0f); outputInfo.SetQuantizationOffset(0); diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp index d758137b3b..e715e6b187 100644 --- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp @@ -36,7 +36,7 @@ armnn::INetworkPtr CreateInstanceNormalizationNetwork(const armnn::TensorShape& // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); InstanceNormalizationDescriptor instanceNormalizationDesc; instanceNormalizationDesc.m_Gamma = gamma; @@ -104,7 +104,7 @@ void InstanceNormalizationNhwcEndToEndTest1(const std::vector& const float gamma = 1.0f; TensorShape inputShape{2, 2, 2, 2}; - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); TensorShape outputShape{2, 2, 2, 2}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); @@ -174,7 +174,7 @@ void InstanceNormalizationNchwEndToEndTest1(const std::vector& const float gamma = 1.0f; TensorShape inputShape{2, 2, 2, 2}; - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); TensorShape outputShape{2, 2, 2, 2}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); @@ -248,7 +248,7 @@ void InstanceNormalizationNhwcEndToEndTest2(const std::vector& TensorShape outputShape{2, 2, 2, 2}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); std::vector inputData = std::vector( { @@ -319,7 +319,7 @@ void InstanceNormalizationNchwEndToEndTest2(const std::vector& TensorShape outputShape{2, 2, 2, 2}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); std::vector inputData = std::vector( { diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp index 94855aa7b2..226e2b3364 100644 --- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp +++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp @@ -177,9 +177,11 @@ std::string GetSoftmaxProfilerJson(const std::vector& backends }; std::vector outputData(5); + TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0); + inputTensorInfo2.SetConstant(true); armnn::InputTensors inputTensors { - {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())} + {0, armnn::ConstTensor(inputTensorInfo2, inputData.data())} }; armnn::OutputTensors outputTensors { diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp index 1f7f57806e..181ecd912f 100644 --- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp @@ -27,7 +27,7 @@ armnn::INetworkPtr CreateLogSoftmaxNetwork(const armnn::TensorShape& inputShape, // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); LogSoftmaxDescriptor logSoftmaxDesc; logSoftmaxDesc.m_Beta = beta; diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp index b0ee9bee32..6eecaabf55 100644 --- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp +++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp @@ -370,8 +370,8 @@ TEST_CASE("OptimizeNetworkCopy") const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 2, 2, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -443,10 +443,12 @@ TEST_CASE("OptimizeNetworkCopy") std::vector inputData = GenerateRandomData(runtime->GetInputTensorInfo(optNetId, 0).GetNumElements()); std::vector outputData(runtime->GetOutputTensorInfo(optNetId, 0).GetNumElements()); + armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(optNetId, 0); + inputTensorInfo.SetConstant(true); armnn::InputTensors inputTensors { { - 0 ,armnn::ConstTensor(runtime->GetInputTensorInfo(optNetId, 0), inputData.data()) + 0, armnn::ConstTensor(inputTensorInfo, inputData.data()) } }; armnn::OutputTensors outputTensors @@ -464,10 +466,12 @@ TEST_CASE("OptimizeNetworkCopy") armnn::NetworkId netId = networkIds[i]; std::vector copyOutputData(runtime->GetOutputTensorInfo(netId, 0).GetNumElements()); + armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0); + inputTensorInfo2.SetConstant(true); armnn::InputTensors copyInputTensors { { - 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data()) + 0, armnn::ConstTensor(inputTensorInfo2, inputData.data()) } }; armnn::OutputTensors copyOutputTensors diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp index e11553dd38..c31d084b0e 100644 --- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp @@ -51,8 +51,10 @@ void PreluEndToEnd(const std::vector& backends, inputInfo.SetQuantizationOffset(qOffset); inputInfo.SetQuantizationScale(qScale); + inputInfo.SetConstant(true); alphaInfo.SetQuantizationOffset(qOffset); alphaInfo.SetQuantizationScale(qScale); + alphaInfo.SetConstant(true); outputInfo.SetQuantizationOffset(qOffset); outputInfo.SetQuantizationScale(qScale); diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp index 281bed18e7..e2147fc59b 100644 --- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp @@ -80,22 +80,22 @@ void QLstmEndToEnd(const std::vector& backends) const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); const armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); const armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, - layerNormOffset); + layerNormOffset, true); // Mandatory params const std::vector inputToForgetWeightsVector = @@ -179,17 +179,17 @@ void QLstmEndToEnd(const std::vector& backends) const armnn::TensorInfo inputInfo({numBatches , inputSize}, armnn::DataType::QAsymmS8, inputScale, - inputOffset); + inputOffset, true); const armnn::TensorInfo cellStateInfo({numBatches , numUnits}, armnn::DataType::QSymmS16, cellStateScale, - cellStateOffset); + cellStateOffset, true); const armnn::TensorInfo outputStateInfo({numBatches , outputSize}, armnn::DataType::QAsymmS8, outputScale, - outputOffset); + outputOffset, true); // Input tensor data const std::vector inputVector = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp index a2fadc7b92..f178951873 100644 --- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp @@ -46,14 +46,14 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape, armnn::TensorInfo inputWeightsInfo({outputSize, inputSize}, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize}, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); - armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset); + armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true); armnn::QuantizedLstmInputParams data; @@ -210,9 +210,16 @@ void QuantizedLstmEndToEnd(const std::vector& backends) inputTensors.reserve(3); // input - inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())}); - inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), cellStateInVector.data())}); - inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), outputStateInVector.data())}); + TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0); + TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1); + TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2); + inputTensorInfo0.SetConstant(true); + inputTensorInfo1.SetConstant(true); + inputTensorInfo2.SetConstant(true); + + inputTensors.push_back({0, ConstTensor(inputTensorInfo0, inputVector.data())}); + inputTensors.push_back({1, ConstTensor(inputTensorInfo1, cellStateInVector.data())}); + inputTensors.push_back({2, ConstTensor(inputTensorInfo2, outputStateInVector.data())}); OutputTensors outputTensors; outputTensors.reserve(2); diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp index 461b3b9be8..5229c47331 100644 --- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp @@ -46,7 +46,7 @@ void RankEndToEnd(const std::vector& backends) std::vector expectedOutputData{ 4 }; - TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType); + TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType, 0.0f, 0, true); TensorShape outputShape (Dimensionality::Scalar); TensorInfo outputInfo(outputShape, DataType::Signed32); diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp index aa7af11feb..a56db44161 100644 --- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp @@ -57,7 +57,7 @@ void ResizeEndToEnd(const std::vector& backends, const float qScale = IsQuantizedType() ? 0.25f : 1.0f; const int32_t qOffset = IsQuantizedType() ? 50 : 0; - TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset); + TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true); TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset); std::vector inputData = diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp index 4e5baade27..e3b016ee94 100644 --- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp +++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp @@ -34,7 +34,7 @@ armnn::INetworkPtr CreateSpaceToDepthNetwork(const armnn::TensorShape& inputShap // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); armnnUtils::DataLayoutIndexed dimensionIndices(dataLayout); if (inputShape[dimensionIndices.GetHeightIndex()] % blockSize!=0 @@ -102,7 +102,7 @@ void SpaceToDepthNhwcEndToEndTest1(const std::vector& defaultB const unsigned int blockSize = 2; TensorShape inputShape{1, 2, 2, 1}; - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); TensorShape outputShape{1, 1, 1, 4}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); @@ -133,7 +133,7 @@ void SpaceToDepthNchwEndToEndTest1(const std::vector& defaultB const unsigned int blockSize = 2; TensorShape inputShape{1, 2, 2, 1}; - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); TensorShape outputShape{1, 1, 1, 4}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); @@ -167,7 +167,7 @@ void SpaceToDepthNhwcEndToEndTest2(const std::vector& defaultB TensorShape outputShape{1, 1, 1, 8}; TensorInfo outputTensorInfo(outputShape, DataType::Float32); - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); std::vector inputData = std::vector( { @@ -197,7 +197,7 @@ void SpaceToDepthNchwEndToEndTest2(const std::vector& defaultB TensorShape inputShape{1, 2, 2, 2}; TensorShape outputShape{1, 1, 1, 8}; - TensorInfo inputTensorInfo(inputShape, DataType::Float32); + TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true); TensorInfo outputTensorInfo(outputShape, DataType::Float32); diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp index 64e24e54aa..3a2af6850c 100644 --- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp @@ -31,7 +31,7 @@ INetworkPtr CreateSplitterNetwork(const TensorShape& inputShape, // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset); + TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); std::vector splitterDimSizes(inputShape.GetNumDimensions()); diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp index 764983f3b9..8ef5ecc203 100644 --- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp +++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp @@ -62,8 +62,10 @@ void AsyncThreadedEndToEndTestImpl(INetworkPtr network, inputTensors.reserve(inputTensorData.size()); for (auto&& it : inputTensorData[i]) { + TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first); + inputTensorInfo.SetConstant(true); inputTensors.push_back({it.first, - ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())}); + ConstTensor(inputTensorInfo, it.second.data())}); } outputTensors.reserve(expectedOutputData.size()); @@ -146,8 +148,10 @@ void AsyncEndToEndTestImpl(INetworkPtr network, inputTensors.reserve(inputTensorData.size()); for (auto&& it : inputTensorData) { + TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first); + inputTensorInfo.SetConstant(true); inputTensors.push_back({it.first, - ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())}); + ConstTensor(inputTensorInfo, it.second.data())}); } OutputTensors outputTensors; diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp index 133829c43b..8f10869088 100644 --- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp @@ -68,10 +68,10 @@ void TransposeConvolution2dEndToEnd(const std::vector& backend const float qScale = IsQuantizedType() ? 0.25f : 1.0f; const int32_t qOffset = IsQuantizedType() ? 50 : 0; - TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset); + TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true); TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset); - TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset); - TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0); + TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true); + TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true); std::vector inputData = { -- cgit v1.2.1