aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-06-28 16:52:18 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2022-06-29 10:42:49 +0000
commit03ee5d8a21688555c4e0a68d8400f4c3e3d844e2 (patch)
treeaf30388c80cfa9002980bc41de403e9b4a52f7a2 /src/armnn/test
parenta96489a2fd459bd3d73297fa5fdaef5d13a57a4e (diff)
downloadarmnn-03ee5d8a21688555c4e0a68d8400f4c3e3d844e2.tar.gz
IVGCVSW-6962 Adding Const layer in the graph immediately after Input
instead of immediately before output Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I2d89a1efdabfdb4be24a8998a03fe1f502d26183
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp61
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp34
2 files changed, 50 insertions, 45 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 14c211f9bf..4d7defcabe 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -25,6 +25,7 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
TensorInfo inputInfo(4, inputShape, DataType::Float32);
TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 1.0f, 0, true);
TensorInfo outputInfo(4, outputShape, DataType::Float32);
Layer* input = graph.AddLayer<InputLayer>(0, "input");
@@ -45,16 +46,13 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
std::vector<float> weightsVector(18);
- ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
+ ConstTensor weights(weightsInfo, weightsVector);
ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
-
- TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo();
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
- conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -75,12 +73,11 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
(conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<PadLayer>,
- &IsLayerOfType<ConstantLayer>,
- checkSimpleConv2d,
- &IsLayerOfType<OutputLayer>));
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<PadLayer>,
+ checkSimpleConv2d,
+ &IsLayerOfType<OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoConvolution2d()));
@@ -94,11 +91,10 @@ TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
(conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkPadFoldedIntoConv2d,
- &IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<OutputLayer>));
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ checkPadFoldedIntoConv2d,
+ &IsLayerOfType<OutputLayer>));
}
TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
@@ -111,6 +107,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
TensorInfo inputInfo(4, inputShape, DataType::Float32);
TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 1.0f, 0, true);
TensorInfo outputInfo(4, outputShape, DataType::Float32);
Layer* input = graph.AddLayer<InputLayer>(0, "input");
@@ -131,15 +128,15 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
std::vector<float> weightsVector(18);
- ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
+ ConstTensor weights(weightsInfo, weightsVector);
- auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
- "depthwiseConv2d");
auto* weightsLayer = graph.AddLayer<ConstantLayer>("weights");
+ weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo);
+ weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot().SetTensorInfo(weights.GetInfo());
+ auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
+ "depthwiseConv2d");
depthwiseConv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
- depthwiseConv2dLayer->m_Weight = std::make_shared<ScopedTensorHandle>(weights);
Layer* output = graph.AddLayer<OutputLayer>(0, "output");
@@ -160,12 +157,11 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
(depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<PadLayer>,
- checkSimpleDepthwiseConv2d,
- &IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<OutputLayer>));
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<PadLayer>,
+ checkSimpleDepthwiseConv2d,
+ &IsLayerOfType<OutputLayer>));
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoDepthwiseConvolution2d()));
@@ -181,11 +177,10 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
(depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkPadFoldedIntoDepthwiseConv2d,
- &IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<OutputLayer>));
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ checkPadFoldedIntoDepthwiseConv2d,
+ &IsLayerOfType<OutputLayer>));
}
TEST_CASE("FoldPadLayerIntoPooling2dLayer")
@@ -631,10 +626,10 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
- TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
+ TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 1.0f, 0, true);
ConstTensor weights(weightsInfo, weightsData);
std::vector<float> biasVector = {5, 6, 7, 8};
- TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
+ TensorInfo biasInfo({4}, DataType::Float32, 1.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D");
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index d3a6932a48..66893ce1f5 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -92,18 +92,23 @@ TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
conv->GetOutputSlot().Connect(output->GetInputSlot(0));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::Convolution2dLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
Fp32NetworkToBf16Converter()));
+ CHECK(7 == graph.GetNumLayers());
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::Convolution2dLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
@@ -179,17 +184,23 @@ TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::FullyConnectedLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
Fp32NetworkToBf16Converter()));
+ CHECK(7 == graph.GetNumLayers());
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::FullyConnectedLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
@@ -215,5 +226,4 @@ TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
-
} \ No newline at end of file