aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/NetworkTests.cpp11
-rw-r--r--src/armnn/test/OptimizerTests.cpp8
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp4
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp18
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp23
5 files changed, 33 insertions, 31 deletions
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 7756f40623..9d9810408e 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -605,10 +605,6 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer")
{
armnn::NetworkImpl net;
- unsigned int dims[] = { 10,1,1,1 };
- std::vector<float> convWeightsData(10);
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), convWeightsData);
-
armnn::Convolution2dDescriptor convDesc2d;
convDesc2d.m_PadLeft = 2;
convDesc2d.m_PadRight = 3;
@@ -620,12 +616,7 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer")
convDesc2d.m_DilationY = 3;
convDesc2d.m_BiasEnabled = false;
convDesc2d.m_DataLayout = armnn::DataLayout::NCHW;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d,
- weights,
- armnn::EmptyOptional(),
- "conv layer");
- ARMNN_NO_DEPRECATE_WARN_END
+ armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, "conv layer");
CHECK(convLayer);
const armnn::BaseDescriptor& descriptor = convLayer->GetParameters();
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 3dd55279c6..b78863dddc 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -885,11 +885,11 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
// Define the network
Graph graph;
- auto input = graph.AddLayer<InputLayer>(0, "input");
+ auto input = graph.AddLayer<InputLayer>(0, "input");
auto weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
- auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
- auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
- auto output = graph.AddLayer<OutputLayer>(0, "output");
+ auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution");
+ auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm");
+ auto output = graph.AddLayer<OutputLayer>(0, "output");
// Set layer information
input->GetOutputSlot().SetTensorInfo(inputInfo);
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index d7465c8361..feeea5d478 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -978,8 +978,8 @@ TEST_CASE("MultipleSimpleSubgraphs")
"m3");
auto x2 = graph.InsertNewLayer<Convolution2dLayer>(m3->GetInputSlot(0),
- Convolution2dDescriptor{},
- "x2");
+ Convolution2dDescriptor{},
+ "x2");
auto w2 = graph.InsertNewLayer<ConstantLayer>(x2->GetInputSlot(1), "w2");
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 027b10377d..14c211f9bf 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -636,13 +636,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
std::vector<float> biasVector = {5, 6, 7, 8};
TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
ConstTensor bias(biasInfo, biasVector);
- Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor,
- weights,
- optionalBias,
- "Conv2D");
- ARMNN_NO_DEPRECATE_WARN_END
+
+ IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D");
+
TensorInfo outputInfo(4, outputShape, DataType::Float32);
conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
@@ -653,6 +649,14 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio
padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1));
+
+ auto biasLayer = network->AddConstantLayer(bias, "Bias");
+ biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo());
+ biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2));
+
// Create ArmNN runtime
IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
// Optimise the network
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 4a94f7889b..54cbbce89f 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -31,9 +31,10 @@ public:
const Optional<ConstTensor> &biases,
const char *name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return network->AddConvolution2dLayer(descriptor, weights, biases, name);
- ARMNN_NO_DEPRECATE_WARN_END
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+
+ return network->AddConvolution2dLayer(descriptor, name);
}
static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network,
@@ -41,12 +42,18 @@ public:
const ConstTensor &weights,
const Optional<ConstTensor> &biases)
{
- IgnoreUnused(network);
- IgnoreUnused(descriptor);
- IgnoreUnused(weights);
- IgnoreUnused(biases);
+ auto weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
+ std::vector<IConnectableLayer*> layers = {weightsLayer};
- return {};
+ if (descriptor.m_BiasEnabled)
+ {
+ auto biasLayer = network->AddConstantLayer(biases.value(), "Bias");
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo());
+ layers.emplace_back(biasLayer);
+ }
+
+ return layers;
}
};