diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2021-10-22 11:12:07 +0100 |
---|---|---|
committer | David Monahan <david.monahan@arm.com> | 2021-11-08 19:05:11 +0000 |
commit | 5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch) | |
tree | 7f49f91e76f171041fe51c2c078b9271aa220b48 /src/armnn/test/optimizations/FoldPadTests.cpp | |
parent | d69cb904415621b066599dc20164bdb71558dc14 (diff) | |
download | armnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz |
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532
!armnn-internal-tests:372451
* Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to
throw InvalidArgumentException when TensorInfo isConstant parameter
is false.
* Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data()
using template<typename MemoryType>.
* Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and
called submethods to return TensorInfo& rather than TensorInfo.
* Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any
ConstTensor created has it's TensorInfo isConstant set to true.
* Added unit tests in TensorTest.cpp to ensure ConstTensor constructors
throw InvalidArgumentException when TensorInfo isConstat parameter is
false.
* Added unit test to ensure an empty ConstTensor constructor will set
TensorInfo isConatant to true.
* Indentation fixes.
* Fix to arm_tensor.i to add isConstant parameter to TensorInfo
constructor. Added methods IsConstant() and SetConstant().
* Fix to const_tensor.py to throw ValueError when TensorInfo
isConstant is set to false when constructing a ConstTensor.
* Fixed PyArmnn unit tests to set TensorInfo isConstant to
True when ConstTensor is used.
* Added unit tests in test_const_tensor.py to ensure ConstTensor
constructors throw ValueError when TensorInfo isConstat parameter
is false.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'src/armnn/test/optimizations/FoldPadTests.cpp')
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 11f09e80e0..a598983706 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -45,7 +45,7 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer") convolution2dDescriptor.m_DataLayout = DataLayout::NHWC; std::vector<float> weightsVector(18); - ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector); + ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector); Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d"); conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights); @@ -122,7 +122,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer") depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC; std::vector<float> weightsVector(18); - ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector); + ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector); auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor, "depthwiseConv2d"); @@ -526,7 +526,9 @@ TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimiza NetworkId networkIdentifier; CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success); - InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}}; + TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0); + inputTensorInfo.SetConstant(true); + InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}}; // Set the initial values of the data to different values to the golden data just in case the inference fails. std::vector<float> optimizedData(32, -std::numeric_limits<float>::infinity()); @@ -614,10 +616,10 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42}; - TensorInfo weightsInfo(4, weightsShape, DataType::Float32); + TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true); ConstTensor weights(weightsInfo, weightsData); std::vector<float> biasVector = {5, 6, 7, 8}; - TensorInfo biasInfo({4}, DataType::Float32); + TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias); @@ -644,7 +646,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio NetworkId networkIdentifier; CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success); - InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}}; + TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0); + inputTensorInfo.SetConstant(true); + InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}}; // Set the initial values of the data to different values to the golden data just in case the inference fails. std::vector<float> optimizedData(100, -std::numeric_limits<float>::infinity()); @@ -732,10 +736,10 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42}; - TensorInfo weightsInfo(4, weightsShape, DataType::Float32); + TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true); ConstTensor weights(weightsInfo, weightsData); std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8}; - TensorInfo biasInfo({12}, DataType::Float32); + TensorInfo biasInfo({12}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias); @@ -762,7 +766,9 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp NetworkId networkIdentifier; CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success); - InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}}; + TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0); + inputTensorInfo.SetConstant(true); + InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}}; // Set the initial values of the data to different values to the golden data just in case the inference fails. std::vector<float> optimizedData(300, -std::numeric_limits<float>::infinity()); |