From 4df97eb257d3fc29b7431d9cb8a054b21d5a7448 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Thu, 29 Apr 2021 08:00:06 +0100 Subject: IVGCVSW-5890 Prevent modification to const layers with multiple connections * In AddBroadcastReshapeLayerImpl check if a constant layer has other connections before modifying its output tensor shape. * In ElementWiseBaseLayer replace an ARMNN_ASSERT with a proper error message. Signed-off-by: Colm Donelan Change-Id: Id3f3796c260eede61f076660505257a8b65d93fc --- src/armnn/layers/ElementwiseBaseLayer.cpp | 29 ++++++---- .../optimizations/AddBroadcastReshapeLayer.hpp | 54 +++++++++--------- .../AddBroadcastReshapeLayerTests.cpp | 64 ++++++++++++++++++++++ 3 files changed, 107 insertions(+), 40 deletions(-) diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp index a169d31b2d..87093f684e 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.cpp +++ b/src/armnn/layers/ElementwiseBaseLayer.cpp @@ -13,11 +13,12 @@ namespace armnn { -ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, - LayerType type, const char* name) +ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, + unsigned int numOutputSlots, + LayerType type, + const char* name) : Layer(numInputSlots, numOutputSlots, type, name) -{ -} +{} std::vector ElementwiseBaseLayer::InferOutputShapes(const std::vector& inputShapes) const { @@ -27,7 +28,15 @@ std::vector ElementwiseBaseLayer::InferOutputShapes(const std::vect if (m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly) { - ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); + if (input0.GetNumDimensions() != input1.GetNumDimensions()) + { + std::stringstream errorMessage; + errorMessage << GetLayerTypeAsCString(GetType()) << " layer \"" << GetName() << "\": "; + errorMessage << "The tensor inputs to an element-wise operator are expected to have equal number of " + "dimensions. First = " + << input0.GetNumDimensions() << " second = " << input1.GetNumDimensions(); + throw InvalidArgumentException(errorMessage.str(), CHECK_LOCATION()); + } } else if (m_ShapeInferenceMethod == ShapeInferenceMethod::InferAndValidate && inputShapes[0].GetNumDimensions() < inputShapes[1].GetNumDimensions()) @@ -36,7 +45,7 @@ std::vector ElementwiseBaseLayer::InferOutputShapes(const std::vect input0 = inputShapes[1]; } - unsigned int numDims = input0.GetNumDimensions(); + unsigned int numDims = input0.GetNumDimensions(); unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions(); // Get the max of the inputs. @@ -72,10 +81,8 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs() VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); - auto inferredShapes = InferOutputShapes({ - GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() - }); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); @@ -87,4 +94,4 @@ void ElementwiseBaseLayer::ExecuteStrategy(IStrategy& strategy) const strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName()); } -} // namespace armnn +} // namespace armnn diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp index 0a5ad9d152..aa00b9913c 100644 --- a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp +++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp @@ -15,14 +15,9 @@ namespace armnn namespace optimizations { -static const std::set broadcastOps { - LayerType::Addition, - LayerType::Division, - LayerType::Maximum, - LayerType::Minimum, - LayerType::Multiplication, - LayerType::Subtraction -}; +static const std::set broadcastOps{ LayerType::Addition, LayerType::Division, + LayerType::Maximum, LayerType::Minimum, + LayerType::Multiplication, LayerType::Subtraction }; class AddBroadcastReshapeLayerImpl { @@ -35,8 +30,8 @@ public: layer.GetInputSlot(0).GetConnectedOutputSlot()->IsTensorInfoSet(); layer.GetInputSlot(1).GetConnectedOutputSlot()->IsTensorInfoSet(); - const TensorInfo &inputInfo0 = layer.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); - const TensorInfo &inputInfo1 = layer.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(); + const TensorInfo& inputInfo0 = layer.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + const TensorInfo& inputInfo1 = layer.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(); if (inputInfo0.GetNumDimensions() == inputInfo1.GetNumDimensions()) { @@ -44,14 +39,14 @@ public: } unsigned int reshapeSlot = 1; - TensorInfo reshapeInfo = inputInfo1; - TensorInfo inputInfo = inputInfo0; + TensorInfo reshapeInfo = inputInfo1; + TensorInfo inputInfo = inputInfo0; if (inputInfo0.GetNumDimensions() < inputInfo1.GetNumDimensions()) { reshapeSlot = 0; reshapeInfo = inputInfo0; - inputInfo = inputInfo1; + inputInfo = inputInfo1; } uint32_t numDimensions = inputInfo.GetNumDimensions(); @@ -63,38 +58,39 @@ public: } std::vector reshapedDimensions(numDimensions, 1); - std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end()); + std::copy_backward(reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end()); reshapeInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() }); - // If the parent layer is a Constant layer we just change the tensor info rather than adding a reshape layer + // If the parent layer is a Constant layer and it is only used once we can short circuit by just + // changing the tensor info rather than adding a reshape layer. Layer& parentLayer = layer.GetInputSlot(reshapeSlot).GetConnectedOutputSlot()->GetOwningLayer(); - if (parentLayer.GetType() == armnn::LayerType::Constant) + if ((parentLayer.GetType() == armnn::LayerType::Constant) && + (parentLayer.GetOutputSlot(0).GetNumConnections() == 1)) { ConstantLayer& constantLayer = static_cast(parentLayer); constantLayer.m_LayerOutput = std::make_unique( - ConstTensor(reshapeInfo,constantLayer.m_LayerOutput.get()->GetConstTensor())); + ConstTensor(reshapeInfo, constantLayer.m_LayerOutput.get()->GetConstTensor())); constantLayer.GetOutputSlot().SetTensorInfo(reshapeInfo); - - return; } - - const std::string layerName = "Reshape_for:" + layer.GetNameStr() + "-" + std::to_string(reshapeSlot); - const ReshapeDescriptor descriptor{reshapeInfo.GetShape()}; - ReshapeLayer *reshapeLayer = graph.InsertNewLayer(layer.GetInputSlot(reshapeSlot), - descriptor, - layerName.c_str()); - reshapeLayer->GetOutputSlot().SetTensorInfo(reshapeInfo); + else + { + const std::string layerName = "Reshape_for:" + layer.GetNameStr() + "-" + std::to_string(reshapeSlot); + const ReshapeDescriptor descriptor{ reshapeInfo.GetShape() }; + ReshapeLayer* reshapeLayer = + graph.InsertNewLayer(layer.GetInputSlot(reshapeSlot), descriptor, layerName.c_str()); + reshapeLayer->GetOutputSlot().SetTensorInfo(reshapeInfo); + } } } protected: - AddBroadcastReshapeLayerImpl() = default; + AddBroadcastReshapeLayerImpl() = default; ~AddBroadcastReshapeLayerImpl() = default; }; using AddBroadcastReshapeLayer = OptimizeForType; -} // namespace optimizations -} // namespace armnn +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp index 594b17261d..4523e70437 100644 --- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp +++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp @@ -334,4 +334,68 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest) BOOST_TEST(!reshapeLayer); } +BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest) +{ + // In this test case we recreate the situation where an Addition layer has + // a constant second term, e.g. [1,512] + [1]. The AddBroadcastReshapeLayer + // should modify the constant tensor info to match the number of dimensions. + // However, if this constant term is being reused elsewhere then we shouldn't + // modify it. Instead we insert a resize layer. + + // What we'll do is have two sequential add layers both using the same const tensor. + Graph graph; + const TensorInfo inputInfo({ 1, 512 }, DataType::Float32); + const TensorInfo constantTermInfo({ 1 }, DataType::Float32); + const TensorInfo outputInfo({ 1, 512 }, DataType::Float32); + + auto input = graph.AddLayer(0, "input"); + auto constant = graph.AddLayer("constant"); + auto add1 = graph.AddLayer("add1"); + auto add2 = graph.AddLayer("add2"); + auto output = graph.AddLayer(0, "output"); + + input->GetOutputSlot().SetTensorInfo(inputInfo); + constant->GetOutputSlot().SetTensorInfo(constantTermInfo); + float tensor[] = { 2.0f }; + constant->m_LayerOutput = std::make_unique(ConstTensor(constantTermInfo, &tensor)); + add1->GetOutputSlot().SetTensorInfo(outputInfo); + + input->GetOutputSlot().Connect(add1->GetInputSlot(0)); + constant->GetOutputSlot().Connect(add1->GetInputSlot(1)); + add1->GetOutputSlot().Connect(add2->GetInputSlot(0)); + add2->GetOutputSlot().Connect(output->GetInputSlot(0)); + // This second connection should prevent the modification of the const output tensor. + constant->GetOutputSlot().Connect(add2->GetInputSlot(1)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + + // Run optimizer + armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer())); + + // Broadcast reshape should have been added before each addition layer. + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + + // Ensure the output shape of the constant hasn't changed. + BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape()); + // There should be two extra reshape layers with appropriate names. + Layer* const reshapeLayer1 = GetFirstLayerWithName(graph, "Reshape_for:add1-1"); + Layer* const reshapeLayer2 = GetFirstLayerWithName(graph, "Reshape_for:add2-1"); + BOOST_TEST(reshapeLayer1); + BOOST_TEST(reshapeLayer2); +} + + + BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1