aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2021-04-29 08:00:06 +0100
committerColm Donelan <colm.donelan@arm.com>2021-04-29 12:56:47 +0000
commit4df97eb257d3fc29b7431d9cb8a054b21d5a7448 (patch)
treee717f3d8fb14f4a80836d6ff9b3ad0d097b99948
parenteb03e0fa0f6e8d133efe7d54831ad70da9431874 (diff)
downloadarmnn-4df97eb257d3fc29b7431d9cb8a054b21d5a7448.tar.gz
IVGCVSW-5890 Prevent modification to const layers with multiple connections
* In AddBroadcastReshapeLayerImpl check if a constant layer has other connections before modifying its output tensor shape. * In ElementWiseBaseLayer replace an ARMNN_ASSERT with a proper error message. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: Id3f3796c260eede61f076660505257a8b65d93fc
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp29
-rw-r--r--src/armnn/optimizations/AddBroadcastReshapeLayer.hpp54
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp64
3 files changed, 107 insertions, 40 deletions
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index a169d31b2d..87093f684e 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -13,11 +13,12 @@
namespace armnn
{
-ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
- LayerType type, const char* name)
+ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots,
+ unsigned int numOutputSlots,
+ LayerType type,
+ const char* name)
: Layer(numInputSlots, numOutputSlots, type, name)
-{
-}
+{}
std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
@@ -27,7 +28,15 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
if (m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
{
- ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ if (input0.GetNumDimensions() != input1.GetNumDimensions())
+ {
+ std::stringstream errorMessage;
+ errorMessage << GetLayerTypeAsCString(GetType()) << " layer \"" << GetName() << "\": ";
+ errorMessage << "The tensor inputs to an element-wise operator are expected to have equal number of "
+ "dimensions. First = "
+ << input0.GetNumDimensions() << " second = " << input1.GetNumDimensions();
+ throw InvalidArgumentException(errorMessage.str(), CHECK_LOCATION());
+ }
}
else if (m_ShapeInferenceMethod == ShapeInferenceMethod::InferAndValidate &&
inputShapes[0].GetNumDimensions() < inputShapes[1].GetNumDimensions())
@@ -36,7 +45,7 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
input0 = inputShapes[1];
}
- unsigned int numDims = input0.GetNumDimensions();
+ unsigned int numDims = input0.GetNumDimensions();
unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions();
// Get the max of the inputs.
@@ -72,10 +81,8 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- auto inferredShapes = InferOutputShapes({
- GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
- GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
- });
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
@@ -87,4 +94,4 @@ void ElementwiseBaseLayer::ExecuteStrategy(IStrategy& strategy) const
strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
}
-} // namespace armnn
+} // namespace armnn
diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
index 0a5ad9d152..aa00b9913c 100644
--- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
+++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp
@@ -15,14 +15,9 @@ namespace armnn
namespace optimizations
{
-static const std::set<armnn::LayerType> broadcastOps {
- LayerType::Addition,
- LayerType::Division,
- LayerType::Maximum,
- LayerType::Minimum,
- LayerType::Multiplication,
- LayerType::Subtraction
-};
+static const std::set<armnn::LayerType> broadcastOps{ LayerType::Addition, LayerType::Division,
+ LayerType::Maximum, LayerType::Minimum,
+ LayerType::Multiplication, LayerType::Subtraction };
class AddBroadcastReshapeLayerImpl
{
@@ -35,8 +30,8 @@ public:
layer.GetInputSlot(0).GetConnectedOutputSlot()->IsTensorInfoSet();
layer.GetInputSlot(1).GetConnectedOutputSlot()->IsTensorInfoSet();
- const TensorInfo &inputInfo0 = layer.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- const TensorInfo &inputInfo1 = layer.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+ const TensorInfo& inputInfo0 = layer.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ const TensorInfo& inputInfo1 = layer.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
if (inputInfo0.GetNumDimensions() == inputInfo1.GetNumDimensions())
{
@@ -44,14 +39,14 @@ public:
}
unsigned int reshapeSlot = 1;
- TensorInfo reshapeInfo = inputInfo1;
- TensorInfo inputInfo = inputInfo0;
+ TensorInfo reshapeInfo = inputInfo1;
+ TensorInfo inputInfo = inputInfo0;
if (inputInfo0.GetNumDimensions() < inputInfo1.GetNumDimensions())
{
reshapeSlot = 0;
reshapeInfo = inputInfo0;
- inputInfo = inputInfo1;
+ inputInfo = inputInfo1;
}
uint32_t numDimensions = inputInfo.GetNumDimensions();
@@ -63,38 +58,39 @@ public:
}
std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
- std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
+ std::copy_backward(reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
reshapeInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() });
- // If the parent layer is a Constant layer we just change the tensor info rather than adding a reshape layer
+ // If the parent layer is a Constant layer and it is only used once we can short circuit by just
+ // changing the tensor info rather than adding a reshape layer.
Layer& parentLayer = layer.GetInputSlot(reshapeSlot).GetConnectedOutputSlot()->GetOwningLayer();
- if (parentLayer.GetType() == armnn::LayerType::Constant)
+ if ((parentLayer.GetType() == armnn::LayerType::Constant) &&
+ (parentLayer.GetOutputSlot(0).GetNumConnections() == 1))
{
ConstantLayer& constantLayer = static_cast<ConstantLayer&>(parentLayer);
constantLayer.m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(
- ConstTensor(reshapeInfo,constantLayer.m_LayerOutput.get()->GetConstTensor<void>()));
+ ConstTensor(reshapeInfo, constantLayer.m_LayerOutput.get()->GetConstTensor<void>()));
constantLayer.GetOutputSlot().SetTensorInfo(reshapeInfo);
-
- return;
}
-
- const std::string layerName = "Reshape_for:" + layer.GetNameStr() + "-" + std::to_string(reshapeSlot);
- const ReshapeDescriptor descriptor{reshapeInfo.GetShape()};
- ReshapeLayer *reshapeLayer = graph.InsertNewLayer<ReshapeLayer>(layer.GetInputSlot(reshapeSlot),
- descriptor,
- layerName.c_str());
- reshapeLayer->GetOutputSlot().SetTensorInfo(reshapeInfo);
+ else
+ {
+ const std::string layerName = "Reshape_for:" + layer.GetNameStr() + "-" + std::to_string(reshapeSlot);
+ const ReshapeDescriptor descriptor{ reshapeInfo.GetShape() };
+ ReshapeLayer* reshapeLayer =
+ graph.InsertNewLayer<ReshapeLayer>(layer.GetInputSlot(reshapeSlot), descriptor, layerName.c_str());
+ reshapeLayer->GetOutputSlot().SetTensorInfo(reshapeInfo);
+ }
}
}
protected:
- AddBroadcastReshapeLayerImpl() = default;
+ AddBroadcastReshapeLayerImpl() = default;
~AddBroadcastReshapeLayerImpl() = default;
};
using AddBroadcastReshapeLayer = OptimizeForType<Layer, AddBroadcastReshapeLayerImpl>;
-} // namespace optimizations
-} // namespace armnn
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 594b17261d..4523e70437 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -334,4 +334,68 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
BOOST_TEST(!reshapeLayer);
}
+BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
+{
+ // In this test case we recreate the situation where an Addition layer has
+ // a constant second term, e.g. [1,512] + [1]. The AddBroadcastReshapeLayer
+ // should modify the constant tensor info to match the number of dimensions.
+ // However, if this constant term is being reused elsewhere then we shouldn't
+ // modify it. Instead we insert a resize layer.
+
+ // What we'll do is have two sequential add layers both using the same const tensor.
+ Graph graph;
+ const TensorInfo inputInfo({ 1, 512 }, DataType::Float32);
+ const TensorInfo constantTermInfo({ 1 }, DataType::Float32);
+ const TensorInfo outputInfo({ 1, 512 }, DataType::Float32);
+
+ auto input = graph.AddLayer<InputLayer>(0, "input");
+ auto constant = graph.AddLayer<ConstantLayer>("constant");
+ auto add1 = graph.AddLayer<AdditionLayer>("add1");
+ auto add2 = graph.AddLayer<AdditionLayer>("add2");
+ auto output = graph.AddLayer<OutputLayer>(0, "output");
+
+ input->GetOutputSlot().SetTensorInfo(inputInfo);
+ constant->GetOutputSlot().SetTensorInfo(constantTermInfo);
+ float tensor[] = { 2.0f };
+ constant->m_LayerOutput = std::make_unique<ScopedCpuTensorHandle>(ConstTensor(constantTermInfo, &tensor));
+ add1->GetOutputSlot().SetTensorInfo(outputInfo);
+
+ input->GetOutputSlot().Connect(add1->GetInputSlot(0));
+ constant->GetOutputSlot().Connect(add1->GetInputSlot(1));
+ add1->GetOutputSlot().Connect(add2->GetInputSlot(0));
+ add2->GetOutputSlot().Connect(output->GetInputSlot(0));
+ // This second connection should prevent the modification of the const output tensor.
+ constant->GetOutputSlot().Connect(add2->GetInputSlot(1));
+
+ BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<OutputLayer>));
+
+ // Run optimizer
+ armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
+
+ // Broadcast reshape should have been added before each addition layer.
+ BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ConstantLayer>,
+ &IsLayerOfType<ReshapeLayer>,
+ &IsLayerOfType<ReshapeLayer>,
+ &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<AdditionLayer>,
+ &IsLayerOfType<OutputLayer>));
+
+ // Ensure the output shape of the constant hasn't changed.
+ BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
+ // There should be two extra reshape layers with appropriate names.
+ Layer* const reshapeLayer1 = GetFirstLayerWithName(graph, "Reshape_for:add1-1");
+ Layer* const reshapeLayer2 = GetFirstLayerWithName(graph, "Reshape_for:add2-1");
+ BOOST_TEST(reshapeLayer1);
+ BOOST_TEST(reshapeLayer2);
+}
+
+
+
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file