aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp52
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.hpp10
-rw-r--r--src/armnn/test/CreateWorkload.hpp85
-rw-r--r--src/armnn/test/GraphTests.cpp8
-rw-r--r--src/armnn/test/NetworkTests.cpp36
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp18
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp185
7 files changed, 318 insertions, 76 deletions
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index baafcf41ef..d3d8698972 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -484,16 +484,23 @@ TEST_CASE("CheckFullyConnectedLayer")
{
FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_ConstantWeights = true;
+ descriptor.m_BiasEnabled = false;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional());
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestFullyConnectedLayerVistor visitor(descriptor);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional());
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ weightsLayer->Accept(weightsVisitor);
layer->Accept(visitor);
}
@@ -502,16 +509,23 @@ TEST_CASE("CheckNamedFullyConnectedLayer")
const char* layerName = "FullyConnectedLayer";
FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_ConstantWeights = true;
+ descriptor.m_BiasEnabled = false;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional(), layerName);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestFullyConnectedLayerVistor visitor(descriptor, layerName);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional(), layerName);
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ weightsLayer->Accept(weightsVisitor);
layer->Accept(visitor);
}
@@ -519,6 +533,7 @@ TEST_CASE("CheckFullyConnectedLayerWithBiases")
{
FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_ConstantWeights = true;
descriptor.m_BiasEnabled = true;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
@@ -528,13 +543,21 @@ TEST_CASE("CheckFullyConnectedLayerWithBiases")
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- Optional<ConstTensor> optionalBiases(biases);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConstantLayerVisitor biasesVisitor(biases);
+ TestFullyConnectedLayerVistor visitor(descriptor);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases);
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
+
+ weightsLayer->Accept(weightsVisitor);
+ biasesLayer->Accept(biasesVisitor);
layer->Accept(visitor);
}
@@ -543,6 +566,7 @@ TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
const char* layerName = "FullyConnectedLayer";
FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
+ descriptor.m_ConstantWeights = true;
descriptor.m_BiasEnabled = true;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
@@ -552,13 +576,21 @@ TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
- Optional<ConstTensor> optionalBiases(biases);
- TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases, layerName);
+ TestConstantLayerVisitor weightsVisitor(weights);
+ TestConstantLayerVisitor biasesVisitor(biases);
+ TestFullyConnectedLayerVistor visitor(descriptor, layerName);
NetworkImpl net;
- IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases, layerName);
+ IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
+ IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
+ IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
+
+ weightsLayer->Accept(weightsVisitor);
+ biasesLayer->Accept(biasesVisitor);
layer->Accept(visitor);
}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index e423e0f6e3..35e2e872f7 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -90,36 +90,26 @@ class TestFullyConnectedLayerVistor : public TestLayerVisitor
{
public:
explicit TestFullyConnectedLayerVistor(const FullyConnectedDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor> biases,
const char* name = nullptr)
: TestLayerVisitor(name)
, m_Descriptor(descriptor)
- , m_Weights(weights)
- , m_Biases(biases)
{}
virtual ~TestFullyConnectedLayerVistor() {}
void VisitFullyConnectedLayer(const IConnectableLayer* layer,
const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
CheckLayerName(name);
CheckDescriptor(fullyConnectedDescriptor);
- CheckConstTensors(m_Weights, weights);
- CheckOptionalConstTensors(m_Biases, biases);
}
protected:
void CheckDescriptor(const FullyConnectedDescriptor& descriptor);
private:
FullyConnectedDescriptor m_Descriptor;
- ConstTensor m_Weights;
- Optional<ConstTensor> m_Biases;
};
class TestBatchNormalizationLayerVisitor : public TestLayerVisitor
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index b07e3b80a5..759ada97cd 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -1193,7 +1193,7 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
{
// Creates the layer we're testing.
FullyConnectedDescriptor layerDesc;
- layerDesc.m_BiasEnabled = true;
+ layerDesc.m_BiasEnabled = false;
layerDesc.m_TransposeWeightMatrix = true;
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
@@ -1201,17 +1201,24 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ // As optimization isn't run member variables need to be updated.
layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
+
+ armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
+ weightsTensorInfo.SetConstant();
// Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ auto const weights = graph.AddLayer<ConstantLayer>("weights");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+ weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
+ weights->m_LayerOutput->Allocate();
+
// Connects up.
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale));
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
+ Connect(weights, layer, weightsTensorInfo, 0, 1);
Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
CreateTensorHandles(graph, factory);
@@ -1219,13 +1226,10 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
- CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
- CHECK((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1246,11 +1250,17 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ // As optimization isn't run member variables need to be updated.
layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
+ armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
+ armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale);
+ weightsTensorInfo.SetConstant();
+ biasesTensorInfo.SetConstant();
+
auto activationDesc = std::make_shared<ActivationDescriptor>();
activationDesc->m_A = 10.0f;
activationDesc->m_B = 5.0f;
@@ -1267,10 +1277,19 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
// Creates extra layers.
Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ auto const weights = graph.AddLayer<ConstantLayer>("weights");
+ auto const biases = graph.AddLayer<ConstantLayer>("biases");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+ weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
+ weights->m_LayerOutput->Allocate();
+ biases->m_LayerOutput = std::make_unique<ScopedTensorHandle>(biasesTensorInfo);
+ biases->m_LayerOutput->Allocate();
+
// Connects up.
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale));
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
+ Connect(weights, layer, weightsTensorInfo, 0, 1);
+ Connect(biases, layer, biasesTensorInfo, 0, 2);
Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
CreateTensorHandles(graph, factory);
@@ -1290,10 +1309,52 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
- CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename FullyConnectedWorkload, armnn::DataType DataType>
+std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest
+ (armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ FullyConnectedDescriptor layerDesc;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_TransposeWeightMatrix = true;
+ layerDesc.m_ConstantWeights = false;
+
+ FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+
+ // Creates extra layers with weights and biases as input layers.
+ Layer* const input = graph.AddLayer<InputLayer>(1, "input");
+ Layer* const weights = graph.AddLayer<InputLayer>(2, "weights");
+ Layer* const biases = graph.AddLayer<InputLayer>(3, "biases");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
+ Connect(weights, layer, TensorInfo({7, 20}, DataType, inputsQScale), 0, 1);
+ Connect(biases, layer, TensorInfo({7}, GetBiasDataType(DataType), inputsQScale), 0, 2);
+ Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
+
+ FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
+
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+ CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+ CHECK(queueDescriptor.m_Parameters.m_ConstantWeights == false);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
- CHECK((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
// Returns so we can do extra, backend-specific tests.
return workload;
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 0dc2619e51..b697f6dbe6 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -598,14 +598,14 @@ TEST_CASE("CheckGraphConstTensorSharing")
{
armnn::Graph graph1;
- armnn::FullyConnectedLayer* const fcLayer =
- graph1.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
+ armnn::ConstantLayer* const constantLayer = graph1.AddLayer<armnn::ConstantLayer>("ConstantLayer");
float weight = 1.0f;
armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32}, &weight);
- fcLayer->m_Weight = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
+ constantLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
+
// point sharedWeightPtr to graph1's const tensor
- sharedWeightPtr = fcLayer->m_Weight->GetConstTensor<float>();
+ sharedWeightPtr = constantLayer->m_LayerOutput->GetConstTensor<float>();
graph0 = armnn::Graph(graph1);
// graph1 goes out of scope
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index d763a85100..9acb60df4a 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -86,12 +86,15 @@ TEST_CASE("NetworkModification")
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
armnn::FullyConnectedDescriptor fullyConnectedDesc;
+
+ // Constant layer that now holds weights data for FullyConnected
+ armnn::IConnectableLayer* const constantWeightsLayer = net.AddConstantLayer(weights, "const weights");
armnn::IConnectableLayer* const fullyConnectedLayer = net.AddFullyConnectedLayer(fullyConnectedDesc,
- weights,
- armnn::EmptyOptional(),
"fully connected");
+ CHECK(constantWeightsLayer);
CHECK(fullyConnectedLayer);
+ constantWeightsLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(1));
convLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
armnn::Pooling2dDescriptor pooling2dDesc;
@@ -152,11 +155,12 @@ TEST_CASE("NetworkModification")
multiplicationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
//Tests that all layers are present in the graph.
- CHECK(net.GetGraph().GetNumLayers() == 11);
+ CHECK(net.GetGraph().GetNumLayers() == 12);
//Tests that the vertices exist and have correct names.
CHECK(GraphHasNamedLayer(net.GetGraph(), "input layer"));
CHECK(GraphHasNamedLayer(net.GetGraph(), "conv layer"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "const weights"));
CHECK(GraphHasNamedLayer(net.GetGraph(), "fully connected"));
CHECK(GraphHasNamedLayer(net.GetGraph(), "pooling2d"));
CHECK(GraphHasNamedLayer(net.GetGraph(), "activation"));
@@ -200,6 +204,28 @@ TEST_CASE("NetworkModification")
CHECK(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection());
}
};
+ auto checkOneOutputToTwoInputConnectionForTwoDifferentLayers = []
+ (const armnn::IConnectableLayer* const srcLayer1,
+ const armnn::IConnectableLayer* const srcLayer2,
+ const armnn::IConnectableLayer* const tgtLayer,
+ int expectedSrcNumInputs1 = 1,
+ int expectedSrcNumInputs2 = 1,
+ int expectedDstNumOutputs = 1)
+ {
+ CHECK(srcLayer1->GetNumInputSlots() == expectedSrcNumInputs1);
+ CHECK(srcLayer1->GetNumOutputSlots() == 1);
+ CHECK(srcLayer2->GetNumInputSlots() == expectedSrcNumInputs2);
+ CHECK(srcLayer2->GetNumOutputSlots() == 1);
+ CHECK(tgtLayer->GetNumInputSlots() == 2);
+ CHECK(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
+
+ CHECK(srcLayer1->GetOutputSlot(0).GetNumConnections() == 1);
+ CHECK(srcLayer2->GetOutputSlot(0).GetNumConnections() == 1);
+ CHECK(srcLayer1->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0));
+ CHECK(srcLayer2->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(1));
+ CHECK(&srcLayer1->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection());
+ CHECK(&srcLayer2->GetOutputSlot(0) == tgtLayer->GetInputSlot(1).GetConnection());
+ };
CHECK(AreAllLayerInputSlotsConnected(*convLayer));
CHECK(AreAllLayerInputSlotsConnected(*fullyConnectedLayer));
@@ -214,8 +240,8 @@ TEST_CASE("NetworkModification")
// Checks connectivity.
checkOneOutputToOneInputConnection(inputLayer, convLayer, 0);
- checkOneOutputToOneInputConnection(convLayer, fullyConnectedLayer);
- checkOneOutputToOneInputConnection(fullyConnectedLayer, poolingLayer);
+ checkOneOutputToTwoInputConnectionForTwoDifferentLayers(convLayer, constantWeightsLayer, fullyConnectedLayer, 1, 0);
+ checkOneOutputToOneInputConnection(fullyConnectedLayer, poolingLayer, 2, 1);
checkOneOutputToOneInputConnection(poolingLayer, activationLayer);
checkOneOutputToOneInputConnection(activationLayer, normalizationLayer);
checkOneOutputToOneInputConnection(normalizationLayer, softmaxLayer);
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 8abcfd7595..d3c928fec1 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -401,24 +401,16 @@ TEST_CASE("FloorTest")
TEST_CASE("FullyConnectedTest")
{
- Graph graph;
-
const unsigned int inputWidth = 3u;
const unsigned int inputHeight = 2u;
const unsigned int inputChannels = 1u;
const unsigned int outputChannels = 2u;
- auto layer = BuildGraph<FullyConnectedLayer>(&graph,
- {{1, inputChannels, inputHeight, inputWidth}},
- FullyConnectedDescriptor(),
- "fc");
-
-
- const float Datum = 0.0f;
- ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum);
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
-
- RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
+ CreateGraphAndRunTest<FullyConnectedLayer>({{ 1, inputChannels, inputHeight, inputWidth }, // input
+ { inputChannels, outputChannels }}, // weights
+ {{ 1, outputChannels }}, // output
+ FullyConnectedDescriptor(),
+ "fc");
}
TEST_CASE("GatherTest")
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 24ea8f6680..2352a3c498 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -8,6 +8,7 @@
#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
+#include "test/GraphUtils.hpp"
#include <test/TestUtils.hpp>
#include <doctest/doctest.h>
@@ -41,6 +42,7 @@ struct Convolution2dTest
{
using LayerType = Convolution2dLayer;
static const bool isElementWise = false;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin
static TensorShape GetOutputShape() { return TensorShape( {1, 3, 3, 4}); } // NHWCout
@@ -70,6 +72,16 @@ struct Convolution2dTest
return network->AddConvolution2dLayer(descriptor, weights, optionalBias, name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
@@ -78,6 +90,7 @@ struct DWConvolution2dTest
public:
using LayerType = DepthwiseConvolution2dLayer;
static const bool isElementWise = false;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // [N,H,W,Cin]
static TensorShape GetOutputShape() { return TensorShape( {1, 3, 3, 12}); } // [N,H,W,Cout]
@@ -108,6 +121,16 @@ public:
return network->AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBias, name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
@@ -116,6 +139,7 @@ struct FullyConnectedTest
public:
using LayerType = FullyConnectedLayer;
static const bool isElementWise = false;
+ static const bool isConstTensorAsInputSupported = true;
static TensorShape GetInputShape() { return TensorShape( {2, 5, 1, 1}); } // NCinHW
static TensorShape GetOutputShape() { return TensorShape( {2, 3}); } // NCout
@@ -129,18 +153,31 @@ public:
float scale = 1.f,
int32_t offset = 0)
{
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+
FullyConnectedDescriptor descriptor;
descriptor.m_BiasEnabled = false;
+ return network->AddFullyConnectedLayer(descriptor, name);
+ }
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
std::vector<float> weightsData = { 1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15};
+ 11, 12, 13, 14, 15};
std::vector<T> weightsVector = armnnUtils::QuantizedVector<T>(weightsData, scale, offset);
- TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset);
+ TensorInfo weightsInfo(GetWeightsShape(), ArmnnType, scale, offset, true);
ConstTensor weights(weightsInfo, weightsVector);
- Optional<ConstTensor> optionalBias;
- return network->AddFullyConnectedLayer(descriptor, weights, optionalBias, name);
+ IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+
+ std::vector<IConnectableLayer*> layers = { weightsLayer };
+ return layers;
}
};
@@ -150,6 +187,7 @@ struct BatchNormTest
public:
using LayerType = BatchNormalizationLayer;
static const bool isElementWise = false;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin
static TensorShape GetOutputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCout
@@ -181,6 +219,16 @@ public:
return network->AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma, name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
@@ -188,6 +236,7 @@ struct MultiplicationTest
{
using LayerType = MultiplicationLayer;
static const bool isElementWise = true;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin
static TensorShape GetOutputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCout
@@ -205,6 +254,16 @@ struct MultiplicationTest
return network->AddMultiplicationLayer(name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
@@ -212,6 +271,7 @@ struct AdditionTest
{
using LayerType = AdditionLayer;
static const bool isElementWise = true;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin
static TensorShape GetOutputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCout
@@ -229,6 +289,16 @@ struct AdditionTest
return network->AddAdditionLayer(name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
@@ -236,6 +306,7 @@ struct SubtractionTest
{
using LayerType = SubtractionLayer;
static const bool isElementWise = true;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin
static TensorShape GetOutputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCout
@@ -253,6 +324,16 @@ struct SubtractionTest
return network->AddSubtractionLayer(name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
@@ -260,6 +341,7 @@ struct DivisionTest
{
using LayerType = DivisionLayer;
static const bool isElementWise = true;
+ static const bool isConstTensorAsInputSupported = false;
static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin
static TensorShape GetOutputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCout
@@ -277,11 +359,21 @@ struct DivisionTest
return network->AddDivisionLayer(name);
}
+
+ static std::vector<IConnectableLayer*> AddConstantLayers(INetwork* network,
+ float scale = 1.f,
+ int32_t offset = 0)
+ {
+ IgnoreUnused(network);
+ IgnoreUnused(scale);
+ IgnoreUnused(offset);
+ return {};
+ }
};
template<typename LayerTest,
DataType ArmnnType>
-INetworkPtr CreatNetwork(ActivationDescriptor activationDescriptor, bool preventFusing,
+INetworkPtr CreateNetwork(ActivationDescriptor activationDescriptor, bool preventFusing,
float scale, int32_t offset)
{
// Create a network
@@ -300,6 +392,20 @@ INetworkPtr CreatNetwork(ActivationDescriptor activationDescriptor, bool prevent
IConnectableLayer* outputLayer = network->AddOutputLayer(0);
IConnectableLayer* output2Layer = preventFusing?network->AddOutputLayer(1):nullptr;
+ // If ConstTensorAsInputs is supported weights and bias are stored as constant layers.
+ if(LayerTest::isConstTensorAsInputSupported)
+ {
+ std::vector<IConnectableLayer*> constantLayers = LayerTest::AddConstantLayers(network.get(),
+ scale,
+ offset);
+
+ // Connect constant layers to receiverLayer.
+ for (unsigned int i = 0; i < constantLayers.size(); ++i)
+ {
+ constantLayers[i]->GetOutputSlot(0).Connect(receiverLayer->GetInputSlot(i + 1));
+ }
+ }
+
// Define layers information
TensorInfo inputInfo(LayerTest::GetInputShape(), ArmnnType, scale, offset);
TensorInfo outputInfo(LayerTest::GetOutputShape(), ArmnnType, scale, offset);
@@ -335,7 +441,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
{
// FIRST NETWORK: Fused
// Construct ArmNN network
- INetworkPtr networkFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor, false, scale, offset);
+ INetworkPtr networkFused = CreateNetwork<LayerTest, ArmnnType>(activationDescriptor, false, scale, offset);
// Create ArmNN runtime
IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
@@ -350,12 +456,31 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
(layer->GetNameStr() == "fused-activation-into-receiverLayer");
};
- CHECK(3 == graphFused.GetNumLayers());
- CHECK(CheckSequence(graphFused.cbegin(),
- graphFused.cend(),
- &IsLayerOfType<InputLayer>,
- checkFusedConv2d,
- &IsLayerOfType<OutputLayer>));
+ // If ConstTensorAsInputs is supported, weights and bias are stored as constant layers.
+ if(LayerTest::isConstTensorAsInputSupported)
+ {
+ CHECK(4 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
+ graphFused.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ checkFusedConv2d,
+ &IsLayerOfType<OutputLayer>));
+
+ // Check if new constant layer is connected to fused receiver layer.
+ Layer* fusedReceiverLayer = GetFirstLayerWithName(graphFused, "fused-activation-into-receiverLayer");
+ CHECK(fusedReceiverLayer);
+ CHECK(fusedReceiverLayer->GetInputSlot(1).GetConnection() != nullptr);
+ }
+ else
+ {
+ CHECK(3 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
+ graphFused.cend(),
+ &IsLayerOfType<InputLayer>,
+ checkFusedConv2d,
+ &IsLayerOfType<OutputLayer>));
+ }
// Load network into runtime
NetworkId networkIdentifier;
@@ -376,7 +501,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// SECOND NETWORK: NotFused
// Construct ArmNN network
- INetworkPtr networkNotFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor, true, scale, offset);
+ INetworkPtr networkNotFused = CreateNetwork<LayerTest, ArmnnType>(activationDescriptor, true, scale, offset);
// Create ArmNN runtime
IRuntimePtr runNotFused = IRuntime::Create(IRuntime::CreationOptions()); // default options
@@ -386,14 +511,30 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
- CHECK(5 == graphNotFused.GetNumLayers());
- CHECK(CheckSequence(graphNotFused.cbegin(),
- graphNotFused.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<LayerType>,
- &IsLayerOfType<ActivationLayer>,
- &IsLayerOfType<OutputLayer>,
- &IsLayerOfType<OutputLayer>));
+ // If ConstTensorAsInputs is supported, weights and bias are stored as constant layers.
+ if(LayerTest::isConstTensorAsInputSupported)
+ {
+ CHECK(6 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
+ graphNotFused.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<LayerType>,
+ &IsLayerOfType<ActivationLayer>,
+ &IsLayerOfType<OutputLayer>,
+ &IsLayerOfType<OutputLayer>));
+ }
+ else
+ {
+ CHECK(5 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
+ graphNotFused.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<LayerType>,
+ &IsLayerOfType<ActivationLayer>,
+ &IsLayerOfType<OutputLayer>,
+ &IsLayerOfType<OutputLayer>));
+ }
// Load network into runtime
NetworkId networkIdentifierNotFused;
@@ -433,7 +574,7 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
try
{
// Construct ArmNN network
- INetworkPtr networkFused = CreatNetwork<LayerTest, ArmnnType>(activationDescriptor, false, scale, offset);
+ INetworkPtr networkFused = CreateNetwork<LayerTest, ArmnnType>(activationDescriptor, false, scale, offset);
// Create ArmNN runtime
IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options