diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2021-10-22 11:12:07 +0100 |
---|---|---|
committer | David Monahan <david.monahan@arm.com> | 2021-11-08 19:05:11 +0000 |
commit | 5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch) | |
tree | 7f49f91e76f171041fe51c2c078b9271aa220b48 /src/backends/cl/test | |
parent | d69cb904415621b066599dc20164bdb71558dc14 (diff) | |
download | armnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz |
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532
!armnn-internal-tests:372451
* Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to
throw InvalidArgumentException when TensorInfo isConstant parameter
is false.
* Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data()
using template<typename MemoryType>.
* Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and
called submethods to return TensorInfo& rather than TensorInfo.
* Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any
ConstTensor created has it's TensorInfo isConstant set to true.
* Added unit tests in TensorTest.cpp to ensure ConstTensor constructors
throw InvalidArgumentException when TensorInfo isConstat parameter is
false.
* Added unit test to ensure an empty ConstTensor constructor will set
TensorInfo isConatant to true.
* Indentation fixes.
* Fix to arm_tensor.i to add isConstant parameter to TensorInfo
constructor. Added methods IsConstant() and SetConstant().
* Fix to const_tensor.py to throw ValueError when TensorInfo
isConstant is set to false when constructing a ConstTensor.
* Fixed PyArmnn unit tests to set TensorInfo isConstant to
True when ConstTensor is used.
* Added unit tests in test_const_tensor.py to ensure ConstTensor
constructors throw ValueError when TensorInfo isConstat parameter
is false.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'src/backends/cl/test')
-rw-r--r-- | src/backends/cl/test/ClContextSerializerTests.cpp | 4 | ||||
-rw-r--r-- | src/backends/cl/test/ClCustomAllocatorTests.cpp | 6 | ||||
-rw-r--r-- | src/backends/cl/test/ClFallbackTests.cpp | 5 | ||||
-rw-r--r-- | src/backends/cl/test/ClImportTensorHandleTests.cpp | 4 | ||||
-rw-r--r-- | src/backends/cl/test/Fp16SupportTest.cpp | 6 |
5 files changed, 19 insertions, 6 deletions
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp index 495aa69bff..862ed2ecab 100644 --- a/src/backends/cl/test/ClContextSerializerTests.cpp +++ b/src/backends/cl/test/ClContextSerializerTests.cpp @@ -44,9 +44,11 @@ void RunInference(armnn::NetworkId& netId, armnn::IRuntimePtr& runtime, std::vec 1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax. }; + armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0); + inputTensorInfo.SetConstant(true); armnn::InputTensors inputTensors { - {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())} + {0, armnn::ConstTensor(inputTensorInfo, inputData.data())} }; armnn::OutputTensors outputTensors diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp index 60145139ff..c09d0b2bc2 100644 --- a/src/backends/cl/test/ClCustomAllocatorTests.cpp +++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp @@ -67,7 +67,7 @@ armnn::INetworkPtr CreateTestNetwork(armnn::TensorInfo& inputTensorInfo) armnn::FullyConnectedDescriptor fullyConnectedDesc; float weightsData[] = {1.0f}; // Identity - TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32); + TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true); weightsInfo.SetConstant(true); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -145,9 +145,11 @@ TEST_CASE("ClCustomAllocatorTest") auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr); std::fill_n(outputPtr, numElements, -10.0f); + armnn::TensorInfo inputTensorInfo2 = run->GetInputTensorInfo(networkIdentifier, 0); + inputTensorInfo2.SetConstant(true); armnn::InputTensors inputTensors { - {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), alignedInputPtr)}, + {0, armnn::ConstTensor(inputTensorInfo2, alignedInputPtr)}, }; armnn::OutputTensors outputTensors { diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp index 7721206d3d..7cd05d193b 100644 --- a/src/backends/cl/test/ClFallbackTests.cpp +++ b/src/backends/cl/test/ClFallbackTests.cpp @@ -35,6 +35,7 @@ TEST_CASE("ClImportEnabledFallbackToNeon") sub->GetOutputSlot(0).Connect(output->GetInputSlot(0)); TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32); + info.SetConstant(true); input0->GetOutputSlot(0).SetTensorInfo(info); input1->GetOutputSlot(0).SetTensorInfo(info); @@ -181,6 +182,7 @@ TEST_CASE("ClImportDisabledFallbackToNeon") sub->GetOutputSlot(0).Connect(output->GetInputSlot(0)); TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32); + info.SetConstant(true); input0->GetOutputSlot(0).SetTensorInfo(info); input1->GetOutputSlot(0).SetTensorInfo(info); @@ -311,6 +313,7 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon") pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32); + info.SetConstant(true); TensorInfo poolingInfo = TensorInfo({ 1, 2, 2, 1 }, DataType::Float32); input0->GetOutputSlot(0).SetTensorInfo(info); @@ -468,6 +471,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon") pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32); + info.SetConstant(true); TensorInfo poolingInfo = TensorInfo({ 1, 2, 1, 1 }, DataType::Float32); input0->GetOutputSlot(0).SetTensorInfo(info); @@ -536,6 +540,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon") std::vector<float> expectedOutput{ 11.0f, -1.0f }; + InputTensors inputTensors { { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) }, diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 6b1d3521d5..0403d5379e 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -171,9 +171,11 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd") auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr); std::fill_n(outputPtr, numElements, -10.0f); + TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0); + inputTensorInfo.SetConstant(true); InputTensors inputTensors { - {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), alignedInputPtr)}, + {0,armnn::ConstTensor(inputTensorInfo, alignedInputPtr)}, }; OutputTensors outputTensors { diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp index 1974d4d856..b30a447f9f 100644 --- a/src/backends/cl/test/Fp16SupportTest.cpp +++ b/src/backends/cl/test/Fp16SupportTest.cpp @@ -88,10 +88,12 @@ TEST_CASE("Fp16AdditionTest") 100.0_h, 200.0_h, 300.0_h, 400.0_h }; + TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0); + inputTensorInfo.SetConstant(true); InputTensors inputTensors { - {0,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}, - {1,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())} + {0,ConstTensor(inputTensorInfo, input1Data.data())}, + {1,ConstTensor(inputTensorInfo, input2Data.data())} }; std::vector<Half> outputData(input1Data.size()); |