aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/armnn/Graph.cpp69
-rw-r--r--src/armnn/Graph.hpp5
-rw-r--r--src/armnn/test/TestUtils.cpp24
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp236
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp26
5 files changed, 323 insertions, 37 deletions
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 60bf328c9c..95104049a2 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -534,7 +534,7 @@ void Graph::VerifyConstantLayerSetTensorInfo() const
{
for (auto&& layer : TopologicalSort())
{
- if(layer->GetType() == armnn::LayerType::Constant)
+ if (layer->GetType() == armnn::LayerType::Constant)
{
for (auto&& output: layer->GetOutputSlots())
{
@@ -562,15 +562,9 @@ void Graph::InferTensorInfos()
const IOutputSlot* source = input.GetConnectedOutputSlot();
if (source == NULL)
{
- std::ostringstream message;
- message << "Input slot "
- << input.GetSlotIndex()
- << " not connected to an output slot on "
- << GetLayerTypeAsCString(layer->GetType())
- << " layer \""
- << layer->GetName()
- << "\"";
- throw LayerValidationException(message.str());
+ // Throws exception due to a layer input not being connected to an output slot.
+ // Verifies input slot weights and bias are set for FullyConnected layers.
+ ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
}
if (!source->IsTensorInfoSet())
@@ -578,9 +572,8 @@ void Graph::InferTensorInfos()
std::ostringstream message;
message << "Output slot TensorInfo not set on "
<< GetLayerTypeAsCString(layer->GetType())
- << " layer \""
- << layer->GetName()
- << "\"";
+ << " layer "
+ << std::quoted(layer->GetName());
throw LayerValidationException(message.str());
}
}
@@ -592,4 +585,54 @@ void Graph::InferTensorInfos()
}
}
+/// Throws exception due to a layer input not being connected to an output slot.
+/// Verifies weights and bias are set for FullyConnected layers on input slots 1
+/// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
+///
+/// @param layer constant pointer to a Layer object
+/// @param slotIndex input slot index of layer
+/// @throws LayerValidationException
+void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
+ unsigned int slotIndex)
+{
+ std::ostringstream message;
+ bool noWeightsAndBias = false;
+
+ if (layer->GetType() == armnn::LayerType::FullyConnected && slotIndex > 0)
+ {
+ // If weights are not set and is bias enabled, also check if bias is set
+ if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
+ {
+ const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
+ if (biasSource == NULL)
+ {
+ message << "FullyConnected layer weights and bias not set: ";
+ noWeightsAndBias = true;
+ }
+ }
+
+ // Only weights or bias are not set
+ if (!noWeightsAndBias)
+ {
+ if (slotIndex == 1)
+ {
+ message << "FullyConnected layer weights not set: ";
+ }
+ else
+ {
+ message << "FullyConnected layer bias not set: ";
+ }
+ }
+ }
+
+ std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
+ message << "Input slot(s) "
+ << slotString
+ << " not connected to an output slot on "
+ << GetLayerTypeAsCString(layer->GetType())
+ << " layer "
+ << std::quoted(layer->GetName());
+ throw LayerValidationException(message.str());
+}
+
} // namespace armnn
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index d5fbeafed0..e2321bb0e4 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -268,6 +268,11 @@ private:
std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
ShapeInferenceMethod m_ShapeInferenceMethod;
+
+ // Throws exception due to a layer input not being connected to an output slot.
+ /// Also verifies weights and bias are set for FullyConnected layers.
+ void ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
+ unsigned int slotIndex);
};
/// Common base class for layers in the graph.
diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp
index 6020c7631c..97cc80c8a2 100644
--- a/src/armnn/test/TestUtils.cpp
+++ b/src/armnn/test/TestUtils.cpp
@@ -15,7 +15,29 @@ void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const
ARMNN_ASSERT(from);
ARMNN_ASSERT(to);
- from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+ try
+ {
+ from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+ }
+ catch (const std::out_of_range& exc)
+ {
+ std::ostringstream message;
+
+ if (to->GetType() == armnn::LayerType::FullyConnected && toIndex == 2)
+ {
+ message << "Tried to connect bias to FullyConnected layer when bias is not enabled: ";
+ }
+
+ message << "Failed to connect to input slot "
+ << toIndex
+ << " on "
+ << GetLayerTypeAsCString(to->GetType())
+ << " layer "
+ << std::quoted(to->GetName())
+ << " as the slot does not exist or is unavailable";
+ throw LayerValidationException(message.str());
+ }
+
from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
}
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index 7345ff5151..c3a6aa1a3c 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -103,6 +103,61 @@ armnn::INetworkPtr CreateFullyConnectedNetworkNoTensorInfoConstWeights(const arm
return network;
}
+armnn::INetworkPtr CreateFullyConnectedNetworkNoConnectedWeightsExplicit(const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::TensorInfo& biasTensorInfo,
+ armnn::FullyConnectedDescriptor descriptor)
+{
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input");
+ armnn::IConnectableLayer* biasLayer = network->AddInputLayer(2, "Bias_Input");
+ armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
+ armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+ Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
+ Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
+ Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
+
+ return network;
+}
+
+armnn::INetworkPtr CreateFullyConnectedNetworkNoConnectedWeightsAndBias(const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ armnn::FullyConnectedDescriptor descriptor)
+{
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input");
+ armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
+ armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+ Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
+ Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
+
+ return network;
+}
+
+armnn::INetworkPtr CreateFullyConnectedNetworkNoConnectedBiasExplicit(const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const armnn::TensorInfo& weightsTensorInfo,
+ const armnn::ConstTensor& weightsConstantTensor,
+ armnn::FullyConnectedDescriptor descriptor)
+{
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input");
+ armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weightsConstantTensor, "Weights");
+ armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
+ armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+ Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
+ Connect(weightsLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
+ Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
+
+ return network;
+}
+
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId>& backends)
{
@@ -160,8 +215,7 @@ void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::BackendId>& backends,
const bool transposeWeights,
- const bool constantWeightsOrBias,
- const bool tensorInfoSet)
+ const bool constantWeightsOrBias)
{
unsigned int inputWidth = 1;
unsigned int inputHeight = 1;
@@ -230,24 +284,7 @@ void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::
descriptor.m_TransposeWeightMatrix = transposeWeights;
descriptor.m_ConstantWeights = constantWeightsOrBias;
- if(!tensorInfoSet)
- {
- // Tests constant weights and non constant bias.
- ConstTensor weightsConstantTensor(weightsDesc, weights.data());
-
- armnn::INetworkPtr network = CreateFullyConnectedNetworkNoTensorInfoConstWeights(inputTensorInfo,
- outputTensorInfo,
- weightsConstantTensor,
- descriptor);
- CHECK(network);
-
- // Create runtime in which test will run
- IRuntime::CreationOptions options;
- IRuntimePtr runtime(IRuntime::Create(options));
-
- CHECK_THROWS_AS( Optimize(*network, backends, runtime->GetDeviceSpec()), LayerValidationException );
- }
- else if (!constantWeightsOrBias)
+ if (!constantWeightsOrBias)
{
// Tests non constant weights and constant bias.
ConstTensor biasConstantTensor(biasesDesc, biasValues.data());
@@ -293,4 +330,163 @@ void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::
}
}
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void FullyConnectedErrorChecking(const std::vector<armnn::BackendId>& backends,
+ const bool explicitCheck,
+ const bool biasEnabled,
+ const bool connectedWeights,
+ const bool connectedBias,
+ const bool tensorInfoSet)
+{
+ unsigned int inputWidth = 1;
+ unsigned int inputHeight = 1;
+ unsigned int inputChannels = 5;
+ unsigned int inputNum = 2;
+
+ unsigned int outputChannels = 3;
+ unsigned int outputNum = 2;
+
+ unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+ unsigned int outputShape[] = { outputNum, outputChannels };
+ unsigned int weightsShape[] = { inputChannels, outputChannels };
+
+ unsigned int biasShape[] = { outputChannels };
+
+ armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
+ armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
+ armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+
+ std::vector<float> weights =
+ {
+ .5f, 2.f, .5f,
+ .5f, 2.f, 1.f,
+ .5f, 2.f, 2.f,
+ .5f, 2.f, 3.f,
+ .5f, 2.f, 4.f
+ };
+
+ FullyConnectedDescriptor descriptor;
+ descriptor.m_BiasEnabled = biasEnabled;
+
+ if(explicitCheck)
+ {
+ if(!biasEnabled)
+ {
+ try
+ {
+ CreateFullyConnectedNetworkNoConnectedWeightsExplicit(inputTensorInfo,
+ outputTensorInfo,
+ biasesDesc,
+ descriptor);
+ FAIL("LayerValidationException should have been thrown");
+ }
+ catch (const LayerValidationException& exc)
+ {
+ CHECK(strcmp(exc.what(), "Tried to connect bias to FullyConnected layer when bias is not enabled: "
+ "Failed to connect to input slot 2 on FullyConnected layer "
+ "\"Fully_Connected\" as the slot does not exist or is unavailable") == 0);
+ }
+ }
+ else if (!connectedWeights)
+ {
+ armnn::INetworkPtr network = CreateFullyConnectedNetworkNoConnectedWeightsExplicit(inputTensorInfo,
+ outputTensorInfo,
+ biasesDesc,
+ descriptor);
+ CHECK(network);
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ try
+ {
+ Optimize(*network, backends, runtime->GetDeviceSpec());
+ FAIL("LayerValidationException should have been thrown");
+ }
+ catch (const LayerValidationException& exc)
+ {
+ CHECK(strcmp(exc.what(), "FullyConnected layer weights not set: Input slot(s) 1 not connected "
+ "to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
+ }
+ }
+ else if (!connectedBias)
+ {
+ // Tests with constant weights.
+ ConstTensor weightsConstantTensor(weightsDesc, weights.data());
+
+ armnn::INetworkPtr network = CreateFullyConnectedNetworkNoConnectedBiasExplicit(inputTensorInfo,
+ outputTensorInfo,
+ weightsDesc,
+ weightsConstantTensor,
+ descriptor);
+ CHECK(network);
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ try
+ {
+ Optimize(*network, backends, runtime->GetDeviceSpec());
+ FAIL("LayerValidationException should have been thrown");
+ }
+ catch (const LayerValidationException& exc)
+ {
+ CHECK(strcmp(exc.what(), "FullyConnected layer bias not set: Input slot(s) 2 not connected "
+ "to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
+ }
+ }
+ }
+ else if(!connectedWeights && !connectedBias)
+ {
+ armnn::INetworkPtr network = CreateFullyConnectedNetworkNoConnectedWeightsAndBias(inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ CHECK(network);
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ try
+ {
+ Optimize(*network, backends, runtime->GetDeviceSpec());
+ FAIL("LayerValidationException should have been thrown");
+ }
+ catch (const LayerValidationException& exc)
+ {
+ CHECK(strcmp(exc.what(), "FullyConnected layer weights and bias not set: Input slot(s) 1 & 2 not "
+ "connected to an output slot on FullyConnected layer \"Fully_Connected\"") == 0);
+ }
+
+ }
+ else if(!tensorInfoSet)
+ {
+ // Tests with constant weights.
+ ConstTensor weightsConstantTensor(weightsDesc, weights.data());
+
+ armnn::INetworkPtr network = CreateFullyConnectedNetworkNoTensorInfoConstWeights(inputTensorInfo,
+ outputTensorInfo,
+ weightsConstantTensor,
+ descriptor);
+ CHECK(network);
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ try
+ {
+ Optimize(*network, backends, runtime->GetDeviceSpec());
+ FAIL("LayerValidationException should have been thrown");
+ }
+ catch (const LayerValidationException& exc)
+ {
+ CHECK(strcmp(exc.what(), "Output slot TensorInfo not set on Constant layer \"Weights\"") == 0);
+ }
+ }
+}
+
} // anonymous namespace
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 6c11a75e96..0cc8f4aa10 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -618,17 +618,37 @@ TEST_CASE("RefFullyConnectedEndToEndTestFloat32")
TEST_CASE("RefFullyConnectedEndToEndTestNonConstantWeightsConstantBiasesFloat32")
{
- FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, true, true);
+ FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, true);
}
TEST_CASE("RefFullyConnectedEndToEndTestConstantWeightsNonConstantBiasesFloat32")
{
- FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, false, true);
+ FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, false);
}
TEST_CASE("RefFullyConnectedEndToEndTestConstantWeightsTensorInfoNotSet")
{
- FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, false, false);
+ FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, false, true, true, true, false);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestWeightsNotConnectedExplicitCheck")
+{
+ FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, true, false, true, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestBiasNotConnectedExplicitCheck")
+{
+ FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, true, true, false, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestWeightsAndBiasNotConnected")
+{
+ FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, false, true, false, false, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestBiasDisabledConnectBias")
+{
+ FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, false, false, true, true);
}
TEST_CASE("RefGatherFloatTest")