aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/FuseBatchNormTests.cpp
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-10-22 11:12:07 +0100
committerDavid Monahan <david.monahan@arm.com>2021-11-08 19:05:11 +0000
commit5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch)
tree7f49f91e76f171041fe51c2c078b9271aa220b48 /src/armnn/test/optimizations/FuseBatchNormTests.cpp
parentd69cb904415621b066599dc20164bdb71558dc14 (diff)
downloadarmnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template<typename MemoryType>. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'src/armnn/test/optimizations/FuseBatchNormTests.cpp')
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp21
1 files changed, 13 insertions, 8 deletions
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 20d2940b81..0e969c1a5c 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -107,11 +107,11 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector(begin(weightsIntVector), end(weightsIntVector));
- TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType);
+ TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsVector);
std::vector<T> biasVector = GetVector<T>(outputDimensionSizes[3], 3.3f, 0.1f);
- TensorInfo biasInfo(1, outputChannelSize, ArmnnType);
+ TensorInfo biasInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -120,10 +120,10 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
std::vector<T> meanVector = GetVector<T>(outputDimensionSizes[3], 0.1f, 0.1f);
std::vector<T> varianceVector = GetVector<T>(outputDimensionSizes[3], 1.0f, 0.1f);
- ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
- ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
- ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
- ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
+ ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), betaVector);
+ ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), gammaVector);
+ ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), meanVector);
+ ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), varianceVector);
// Create a network
INetworkPtr network = INetwork::Create();
@@ -215,8 +215,10 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
outputDataFused.resize(108);
}
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensorsFused {
- {0, ConstTensor(run->GetInputTensorInfo (networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
@@ -259,8 +261,11 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
outputDataNotFused.resize(108);
outputData2NotFused.resize(108);
}
+
+ TensorInfo inputTensorInfo2 = runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0);
+ inputTensorInfo2.SetConstant(true);
InputTensors inputTensorsNotFused{
- {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
+ {0, ConstTensor(inputTensorInfo2, inputDataNotFused.data())}};
OutputTensors outputTensorsNotFused{
{0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};