aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-10-22 11:12:07 +0100
committerDavid Monahan <david.monahan@arm.com>2021-11-08 19:05:11 +0000
commit5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch)
tree7f49f91e76f171041fe51c2c078b9271aa220b48 /src/armnn/test/optimizations
parentd69cb904415621b066599dc20164bdb71558dc14 (diff)
downloadarmnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template<typename MemoryType>. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp24
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp8
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp27
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp21
-rw-r--r--src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp4
10 files changed, 61 insertions, 39 deletions
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 36a4507fc3..7573005518 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -290,7 +290,7 @@ TEST_CASE("ReshapeParentConstLayerTest")
{
Graph graph;
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::QAsymmU8);
- const TensorInfo info1({ 5 }, DataType::QAsymmU8);
+ const TensorInfo info1({ 5 }, DataType::QAsymmU8, 0.0f, 0, true);
const TensorInfo outputInfo({ 1, 2, 3, 5 }, DataType::QAsymmU8);
auto input = graph.AddLayer<InputLayer>(0, "input");
@@ -346,7 +346,7 @@ TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
// What we'll do is have two sequential add layers both using the same const tensor.
Graph graph;
const TensorInfo inputInfo({ 1, 512 }, DataType::Float32);
- const TensorInfo constantTermInfo({ 1 }, DataType::Float32);
+ const TensorInfo constantTermInfo({ 1 }, DataType::Float32, 0.0f, 0, true);
const TensorInfo outputInfo({ 1, 512 }, DataType::Float32);
auto input = graph.AddLayer<InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index b78a1bf207..7b326fa8bc 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -32,7 +32,7 @@ TEST_CASE("ConvertConstantsFloatToBFloatTest")
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
};
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
@@ -88,7 +88,7 @@ TEST_CASE("ConvertConstantsBFloatToFloatTest")
std::vector<uint16_t> bfWeights(8);
armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
bfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16), bfWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights);
//Create the simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index e6cca4f7bf..f74ab0f308 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -25,7 +25,7 @@ TEST_CASE("ConvertConstantsFloatToHalfTest")
// Create const tensor from fp32 data
unsigned int dims[] = { 4, 1, 1, 1 };
std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 2ec1279f33..c4551525c1 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -25,7 +25,7 @@ TEST_CASE("ConvertConstantsHalfToFloatTest")
std::vector<uint16_t> halfWeights(4);
armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
halfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16), halfWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true), halfWeights);
//Create the simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 11f09e80e0..a598983706 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -45,7 +45,7 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
std::vector<float> weightsVector(18);
- ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector);
+ ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
@@ -122,7 +122,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
std::vector<float> weightsVector(18);
- ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector);
+ ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
"depthwiseConv2d");
@@ -526,7 +526,9 @@ TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimiza
NetworkId networkIdentifier;
CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
- InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
// Set the initial values of the data to different values to the golden data just in case the inference fails.
std::vector<float> optimizedData(32, -std::numeric_limits<float>::infinity());
@@ -614,10 +616,10 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
- TensorInfo weightsInfo(4, weightsShape, DataType::Float32);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsData);
std::vector<float> biasVector = {5, 6, 7, 8};
- TensorInfo biasInfo({4}, DataType::Float32);
+ TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -644,7 +646,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
NetworkId networkIdentifier;
CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
- InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
// Set the initial values of the data to different values to the golden data just in case the inference fails.
std::vector<float> optimizedData(100, -std::numeric_limits<float>::infinity());
@@ -732,10 +736,10 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
- TensorInfo weightsInfo(4, weightsShape, DataType::Float32);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsData);
std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8};
- TensorInfo biasInfo({12}, DataType::Float32);
+ TensorInfo biasInfo({12}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -762,7 +766,9 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp
NetworkId networkIdentifier;
CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
- InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
// Set the initial values of the data to different values to the golden data just in case the inference fails.
std::vector<float> optimizedData(300, -std::numeric_limits<float>::infinity());
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index 384b14c0cf..63cd170f02 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -59,12 +59,12 @@ TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
};
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create const bias fp32 data
unsigned int biasDims[] {4};
std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
+ armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
// A network with Convolution2d layer
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
@@ -129,12 +129,12 @@ TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
-3.1055E+29f, // 0xF07ADC3C Round up
-9.149516E-10f // 0xB07B7FFF Round down
};
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+ armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
// Create const bias fp32 data
unsigned int biasDims[] {4};
std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
+ armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
// A network with FullyConnected layer
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 2352a3c498..54a9d9a189 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -66,7 +66,7 @@ struct Convolution2dTest
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
- TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
+ TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
ConstTensor weights(weightsInfo, weightsVector);
Optional<ConstTensor> optionalBias;
@@ -115,7 +115,7 @@ public:
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
- TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
+ TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
ConstTensor weights(weightsInfo, weightsVector);
Optional<ConstTensor> optionalBias;
@@ -212,10 +212,10 @@ public:
std::vector<T> varianceVector = GetVector<T>(GetOutputShape()[3], 1.0f, 0.1f);
const unsigned int outputChannelSize[] = { GetOutputShape()[3] };
- ConstTensor beta(TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
- ConstTensor gamma(TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
- ConstTensor mean(TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
- ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
+ ConstTensor beta(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), betaVector);
+ ConstTensor gamma(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), gammaVector);
+ ConstTensor mean(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), meanVector);
+ ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), varianceVector);
return network->AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma, name);
}
@@ -491,8 +491,11 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
std::vector<T> outputDataFused(LayerTest::outputSize);
+ armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+
InputTensors inputTensorsFused{
- {0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
@@ -545,8 +548,11 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
std::vector<T> outputDataNotFused(LayerTest::outputSize);
std::vector<T> outputData2NotFused(LayerTest::outputSize);
+ TensorInfo inputTensorInfoNotFused = runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0);
+ inputTensorInfoNotFused.SetConstant(true);
+
InputTensors inputTensorsNotFused{
- {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
+ {0, ConstTensor(inputTensorInfoNotFused, inputDataNotFused.data())}};
OutputTensors outputTensorsNotFused{
{0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
@@ -591,8 +597,11 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
std::vector<T> inputDataFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
std::vector<T> outputDataFused(LayerTest::outputSize);
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+
InputTensors inputTensorsFused{
- {0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 20d2940b81..0e969c1a5c 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -107,11 +107,11 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
std::vector<T> weightsVector(begin(weightsIntVector), end(weightsIntVector));
- TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType);
+ TensorInfo weightsInfo(4, weightsDimensionSizes, ArmnnType, 0.0f, 0, true);
ConstTensor weights(weightsInfo, weightsVector);
std::vector<T> biasVector = GetVector<T>(outputDimensionSizes[3], 3.3f, 0.1f);
- TensorInfo biasInfo(1, outputChannelSize, ArmnnType);
+ TensorInfo biasInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
@@ -120,10 +120,10 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing)
std::vector<T> meanVector = GetVector<T>(outputDimensionSizes[3], 0.1f, 0.1f);
std::vector<T> varianceVector = GetVector<T>(outputDimensionSizes[3], 1.0f, 0.1f);
- ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType), betaVector);
- ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType), gammaVector);
- ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType), meanVector);
- ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType), varianceVector);
+ ConstTensor beta (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), betaVector);
+ ConstTensor gamma (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), gammaVector);
+ ConstTensor mean (TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), meanVector);
+ ConstTensor variance(TensorInfo(1, outputChannelSize, ArmnnType, 0.0f, 0, true), varianceVector);
// Create a network
INetworkPtr network = INetwork::Create();
@@ -215,8 +215,10 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
outputDataFused.resize(108);
}
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensorsFused {
- {0, ConstTensor(run->GetInputTensorInfo (networkIdentifier, 0), inputDataFused.data())}};
+ {0, ConstTensor(inputTensorInfo, inputDataFused.data())}};
OutputTensors outputTensorsFused{
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
@@ -259,8 +261,11 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
outputDataNotFused.resize(108);
outputData2NotFused.resize(108);
}
+
+ TensorInfo inputTensorInfo2 = runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0);
+ inputTensorInfo2.SetConstant(true);
InputTensors inputTensorsNotFused{
- {0, ConstTensor(runNotFused->GetInputTensorInfo(networkIdentifierNotFused, 0), inputDataNotFused.data())}};
+ {0, ConstTensor(inputTensorInfo2, inputDataNotFused.data())}};
OutputTensors outputTensorsNotFused{
{0, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 0), outputDataNotFused.data())},
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index e91e16f132..f862315220 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -232,7 +232,7 @@ TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
-1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f,
// clang-format on
};
- ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData);
+ ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32, 0.0f, 0, true), inputData);
InputTensors inputs = { { 0, input } };
std::vector<float> outputData(4 * 6);
Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data());
@@ -279,7 +279,7 @@ TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
-1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f,
// clang-format on
};
- ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData);
+ ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32, 0.0f, 0, true), inputData);
InputTensors inputs = { { 0, input } };
std::vector<float> outputData(4 * 6);
Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data());
diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
index 0be8857224..692f371356 100644
--- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
+++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
@@ -94,9 +94,11 @@ void ReduceWithMultipleAxesTest(INetworkPtr& network,
// Create input and output tensors
std::vector<float> outputData(expectedOutput.size());
+ armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
OutputTensors outputTensors
{