From 5b8093c17044e8eaaaa42d96ba4902dee5791be4 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Fri, 22 Oct 2021 11:12:07 +0100 Subject: IVGCVSW-6420: Constant flag in tensor info is not set correctly !android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148 --- src/armnn/optimizations/ConvertConstants.hpp | 8 ++++---- src/armnn/optimizations/FuseBatchNorm.hpp | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'src/armnn/optimizations') diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp index 66b3d2685a..65318af285 100644 --- a/src/armnn/optimizations/ConvertConstants.hpp +++ b/src/armnn/optimizations/ConvertConstants.hpp @@ -35,7 +35,7 @@ struct BFloat16ToFloat32 info.GetNumElements(), newValues.data()); - TensorInfo newInfo(info.GetShape(), DataType::Float32); + TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true); ConstTensor newInput(newInfo, newValues); handle.reset(new ScopedTensorHandle(newInput)); } @@ -56,7 +56,7 @@ struct Float16ToFloat32 info.GetNumElements(), newValues.data()); - TensorInfo newInfo(info.GetShape(), DataType::Float32); + TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true); ConstTensor newInput(newInfo, newValues); handle.reset(new ScopedTensorHandle(newInput)); } @@ -77,7 +77,7 @@ struct Float32ToBFloat16 info.GetNumElements(), newValues.data()); - TensorInfo newInfo(info.GetShape(), DataType::BFloat16); + TensorInfo newInfo(info.GetShape(), DataType::BFloat16, 0.0f, 0, true); ConstTensor newInput(newInfo, newValues); handle.reset(new ScopedTensorHandle(newInput)); } @@ -98,7 +98,7 @@ struct Float32ToFloat16 info.GetNumElements(), newValues.data()); - TensorInfo newInfo(info.GetShape(), DataType::Float16); + TensorInfo newInfo(info.GetShape(), DataType::Float16, 0.0f, 0, true); ConstTensor newInput(newInfo, newValues); handle.reset(new ScopedTensorHandle(newInput)); } diff --git a/src/armnn/optimizations/FuseBatchNorm.hpp b/src/armnn/optimizations/FuseBatchNorm.hpp index fe8238bf14..66f722a8ef 100644 --- a/src/armnn/optimizations/FuseBatchNorm.hpp +++ b/src/armnn/optimizations/FuseBatchNorm.hpp @@ -146,7 +146,7 @@ public: sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut]; } } - ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType), fusedBiasVector); + ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType, 0.0f, 0, true), fusedBiasVector); // Insert the new convolution layer that has batch norm parameters fused into const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") + base.GetName(); -- cgit v1.2.1