aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-06-28 16:52:18 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2022-06-29 10:42:49 +0000
commit03ee5d8a21688555c4e0a68d8400f4c3e3d844e2 (patch)
treeaf30388c80cfa9002980bc41de403e9b4a52f7a2 /src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
parenta96489a2fd459bd3d73297fa5fdaef5d13a57a4e (diff)
downloadarmnn-03ee5d8a21688555c4e0a68d8400f4c3e3d844e2.tar.gz
IVGCVSW-6962 Adding Const layer in the graph immediately after Input
instead of immediately before output Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I2d89a1efdabfdb4be24a8998a03fe1f502d26183
Diffstat (limited to 'src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp')
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp34
1 files changed, 22 insertions, 12 deletions
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index d3a6932a48..66893ce1f5 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -92,18 +92,23 @@ TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
conv->GetOutputSlot().Connect(output->GetInputSlot(0));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::Convolution2dLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
Fp32NetworkToBf16Converter()));
+ CHECK(7 == graph.GetNumLayers());
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::Convolution2dLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
@@ -179,17 +184,23 @@ TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::FullyConnectedLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
Fp32NetworkToBf16Converter()));
+ CHECK(7 == graph.GetNumLayers());
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConstantLayer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
+ &IsLayerOfType<armnn::FullyConnectedLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
@@ -215,5 +226,4 @@ TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
-
} \ No newline at end of file