diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2021-10-22 11:12:07 +0100 |
---|---|---|
committer | David Monahan <david.monahan@arm.com> | 2021-11-08 19:05:11 +0000 |
commit | 5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch) | |
tree | 7f49f91e76f171041fe51c2c078b9271aa220b48 /src/backends/backendsCommon/test/EndToEndTestImpl.hpp | |
parent | d69cb904415621b066599dc20164bdb71558dc14 (diff) | |
download | armnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz |
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532
!armnn-internal-tests:372451
* Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to
throw InvalidArgumentException when TensorInfo isConstant parameter
is false.
* Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data()
using template<typename MemoryType>.
* Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and
called submethods to return TensorInfo& rather than TensorInfo.
* Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any
ConstTensor created has it's TensorInfo isConstant set to true.
* Added unit tests in TensorTest.cpp to ensure ConstTensor constructors
throw InvalidArgumentException when TensorInfo isConstat parameter is
false.
* Added unit test to ensure an empty ConstTensor constructor will set
TensorInfo isConatant to true.
* Indentation fixes.
* Fix to arm_tensor.i to add isConstant parameter to TensorInfo
constructor. Added methods IsConstant() and SetConstant().
* Fix to const_tensor.py to throw ValueError when TensorInfo
isConstant is set to false when constructing a ConstTensor.
* Fixed PyArmnn unit tests to set TensorInfo isConstant to
True when ConstTensor is used.
* Added unit tests in test_const_tensor.py to ensure ConstTensor
constructors throw ValueError when TensorInfo isConstat parameter
is false.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'src/backends/backendsCommon/test/EndToEndTestImpl.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/EndToEndTestImpl.hpp | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 2d268f8ea1..269a46077e 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -79,7 +79,8 @@ bool ConstantUsageTest(const std::vector<BackendId>& computeDevice, inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends) { - const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32); + TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32); + commonTensorInfo.SetConstant(true); return ConstantUsageTest(backends, commonTensorInfo, @@ -98,6 +99,7 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends) commonTensorInfo.SetQuantizationScale(scale); commonTensorInfo.SetQuantizationOffset(offset); + commonTensorInfo.SetConstant(true); return ConstantUsageTest(backends, commonTensorInfo, @@ -198,7 +200,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network @@ -263,7 +265,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network @@ -334,7 +336,7 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network @@ -418,7 +420,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network @@ -449,6 +451,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends) }; INFO("Create Network"); + InputTensors inputTensors { {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, @@ -507,7 +510,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network @@ -536,6 +539,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends) }; INFO("Create Network"); + InputTensors inputTensors { {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, @@ -594,7 +598,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends) input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); @@ -624,6 +628,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends) }; INFO("Create Network"); + InputTensors inputTensors { {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, @@ -685,7 +690,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend activation->GetOutputSlot(0).Connect(output0->GetInputSlot(0)); activation->GetOutputSlot(0).Connect(output1->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32, 0.0f, 0, true)); activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); // Optimize the network @@ -794,7 +799,7 @@ inline void StridedSliceInvalidSliceEndToEndTest(std::vector<BackendId> backends input->GetOutputSlot(0).Connect(stridedSlice->GetInputSlot(0)); stridedSlice->GetOutputSlot(0).Connect(output0->GetInputSlot(0)); - input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32)); + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32, 0.0f, 0, true)); stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32)); // Attempt to optimize the network and check that the correct exception is thrown |