aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp21
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp17
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp15
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp27
-rw-r--r--src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp85
5 files changed, 48 insertions, 117 deletions
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 34e5f6d3b6..118907e703 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -1,12 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <TestUtils.hpp>
-#include <Optimizer.hpp>
#include <Half.hpp>
+#include <Optimizer.hpp>
#include <doctest/doctest.h>
@@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsFloatToHalfTest")
// Create const tensor from fp32 data
unsigned int dims[] = { 4, 1, 1, 1 };
std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
+ armnn::TensorInfo weightsInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true);
+ armnn::ConstTensor weights(weightsInfo, floatWeights);
// Create simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
+ auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("weights");
+ weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo);
+
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
// Connect up the layers
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether data matches expected fp16 data
- const Half* data = fc->m_Weight->GetConstTensor<Half>();
+ const Half* data = weightsLayer->m_LayerOutput->GetConstTensor<Half>();
CHECK(data[0] == Half(1.0f));
CHECK(data[1] == Half(2.0f));
CHECK(data[2] == Half(3.0f));
@@ -100,12 +105,14 @@ TEST_CASE("ConvertConstantsFloatToHalfTest_constant")
fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
+ CHECK(5 == graph.GetNumLayers());
CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
+ CHECK(5 == graph.GetNumLayers());
CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether weights data matches expected fp16 data
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 4c453cc799..778d7b0814 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsHalfToFloatTest")
std::vector<uint16_t> halfWeights(4);
armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
halfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true), halfWeights);
+ armnn::TensorInfo weightInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true);
+ armnn::ConstTensor weights(weightInfo, halfWeights);
//Create the simple test network
auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
input->GetOutputSlot().SetTensorInfo(info);
auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
fc->GetOutputSlot().SetTensorInfo(info);
+ auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("weights");
+ weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
+
auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
//Connect up the layers
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+ weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
//Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
//Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
- const float* data = fc->m_Weight->GetConstTensor<float>();
+ const float* data = weightsLayer->m_LayerOutput->GetConstTensor<float>();
CHECK(1.0f == data[0]);
CHECK(2.0f == data[1]);
CHECK(3.0f == data[2]);
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
index bc8839948b..0a4a4fafde 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,14 +33,21 @@ TEST_CASE("Fp32NetworkToFp16OptimizationTest")
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::OutputLayer>));
+ &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ CHECK(floor->GetDataType() == armnn::DataType::Float16);
+ CHECK(floor->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(floor->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
}
} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 54cbbce89f..5cbd17fb6a 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,13 +27,8 @@ public:
static IConnectableLayer *AddConvolution(INetwork *network,
const Convolution2dDescriptor &descriptor,
- const ConstTensor &weights,
- const Optional<ConstTensor> &biases,
const char *name)
{
- IgnoreUnused(weights);
- IgnoreUnused(biases);
-
return network->AddConvolution2dLayer(descriptor, name);
}
@@ -65,12 +60,8 @@ public:
static IConnectableLayer* AddConvolution(INetwork* network,
const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name)
{
- IgnoreUnused(weights);
- IgnoreUnused(biases);
return network->AddDepthwiseConvolution2dLayer(descriptor, name);
}
@@ -171,8 +162,6 @@ INetworkPtr CreateNetwork(bool depthwise, bool preventFusing)
IConnectableLayer* convLayer = Conv2dTest::AddConvolution(network.get(),
convolution2dDescriptor,
- weights,
- Optional<ConstTensor>(),
"convolution");
IConnectableLayer* batchNormLayer = network->AddBatchNormalizationLayer(batchNormDescriptor,
@@ -243,13 +232,21 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
return IsLayerOfType<ConvLayerType>(layer) &&
(layer->GetNameStr() == "fused-batchNorm-into-convolution");
};
-
+ auto checkConstant = [ ](const armnn::Layer* const layer) -> bool
+ {
+ const ConstantLayer* constLayer = PolymorphicDowncast<const ConstantLayer*>(layer);
+ auto tensor = ConstTensor(constLayer->m_LayerOutput->GetTensorInfo(),
+ constLayer->m_LayerOutput->Map(true));
+ const auto* buffer = static_cast<const T*>(tensor.GetMemoryArea());
+ std::vector<T> vector(buffer, buffer + tensor.GetNumElements());
+ return IsLayerOfType<ConstantLayer>(layer);
+ };
CHECK(5 == graphFused.GetNumLayers());
CHECK(CheckSequence(graphFused.cbegin(),
graphFused.cend(),
&IsLayerOfType<InputLayer>,
- &IsLayerOfType<ConstantLayer>,
- &IsLayerOfType<ConstantLayer>,
+ checkConstant,
+ checkConstant,
checkFusedConv2d,
&IsLayerOfType<OutputLayer>));
diff --git a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
deleted file mode 100644
index b3f9ed8780..0000000000
--- a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("RedirectMembersToConstantInputsFullyConnectedTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo inputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 4 }, armnn::DataType::Float32, 0.0f, 0, true);
- const armnn::TensorInfo biasesInfo ({ 2 }, armnn::DataType::Float32, 0.0f, 0, true);
-
- // Check if isConstant is enabled for weights and biases tensor info.
- CHECK(weightsInfo.IsConstant());
- CHECK(biasesInfo.IsConstant());
-
- armnn::FullyConnectedDescriptor desc;
- desc.m_BiasEnabled = true;
- desc.m_ConstantWeights = false;
-
- // Create the simple test network with Weights and Biases as inputs to a FullyConnected layer.
- auto input = graph.AddLayer<armnn::InputLayer>(0, "Input");
- auto weights = graph.AddLayer<armnn::ConstantLayer>("Weights");
- auto biases = graph.AddLayer<armnn::ConstantLayer>("Biases");
- auto fcLayer = graph.AddLayer<armnn::FullyConnectedLayer>(desc, "FullyConnected");
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "Output");
-
- float expectedWeightsData[] = { 1.0f, 1.0f, 1.0f, 1.0f };
- float expectedBiasesData[] = { 2.0f, 2.0f };
-
- // Set the m_LayerOutput for the optimizer to point to.
- armnn::ConstTensor weightsTensor(weightsInfo, &expectedWeightsData);
- armnn::ConstTensor biasesTensor(biasesInfo, &expectedBiasesData);
- weights->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weightsTensor);
- biases->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(biasesTensor);
-
- input->GetOutputSlot().SetTensorInfo(inputInfo);
- weights->GetOutputSlot().SetTensorInfo(weightsInfo);
- biases->GetOutputSlot().SetTensorInfo(biasesInfo);
- fcLayer->GetOutputSlot().SetTensorInfo(outputInfo);
-
- // Connect up the layers
- input->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(0));
- weights->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(1));
- biases->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(2));
- fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- // Member variables should be null before optimization.
- CHECK(fcLayer->m_Weight == nullptr);
- CHECK(fcLayer->m_Bias == nullptr);
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs()));
-
- // Check if member variables are not null and shape is set correctly.
- CHECK(fcLayer->m_Weight != nullptr);
- CHECK(fcLayer->m_Bias != nullptr);
- CHECK(fcLayer->m_Weight->GetTensorInfo().GetShape() == weightsInfo.GetShape());
- CHECK(fcLayer->m_Bias->GetTensorInfo().GetShape() == biasesInfo.GetShape());
-
- // Check whether data matches expected float data
- const float* weightsData = fcLayer->m_Weight->GetConstTensor<float>();
- CHECK(weightsData[0] == expectedWeightsData[0]);
- CHECK(weightsData[1] == expectedWeightsData[1]);
- CHECK(weightsData[2] == expectedWeightsData[2]);
- CHECK(weightsData[3] == expectedWeightsData[3]);
-
- const float* biasesData = fcLayer->m_Bias->GetConstTensor<float>();
- CHECK(biasesData[0] == expectedBiasesData[0]);
- CHECK(biasesData[1] == expectedBiasesData[1]);
-}
-
-} \ No newline at end of file