diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-09-14 16:12:44 +0100 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-09-15 10:19:00 +0100 |
commit | 16f82f987b44b090a01807a2c79ed7fcc6bf80ea (patch) | |
tree | 5e26fccece92956c19e14d0d5c106e5d38ea4576 /src/armnn | |
parent | 919c14ef132986aa1514b2070ce6d19b5579a6ab (diff) | |
download | armnn-16f82f987b44b090a01807a2c79ed7fcc6bf80ea.tar.gz |
IVGCVSW-5305 AddBroadcastReshapeLayer as optimizer
* Remove AddBroadcastReshapeLayer from TfLiteParser
* Add AddBroadcastReshapeLayer as optimizer
* AddBroadcastReshapeLayer optimizer unit tests
* Load-scope dynamic tensor broadcasting unit tests
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I3549e85b71b41cbd4d96c0f1ece7887acbca76d1
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/Network.cpp | 5 | ||||
-rw-r--r-- | src/armnn/layers/ElementwiseBaseLayer.cpp | 31 | ||||
-rw-r--r-- | src/armnn/optimizations/AddBroadcastReshapeLayer.hpp | 85 | ||||
-rw-r--r-- | src/armnn/optimizations/All.hpp | 1 | ||||
-rw-r--r-- | src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp | 288 |
5 files changed, 402 insertions, 8 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 17813a8983..cd5f369271 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1038,11 +1038,14 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // Get the optimized graph Graph& optGraph = optNetObjPtr->GetGraph(); + // Perform AddBroadcastReshapeLayer optimisation + using namespace optimizations; + Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer())); + // Infer the tensor infos for all output slots. Throws an exception on failure optGraph.InferTensorInfos(); // Perform optimisation passes - using namespace optimizations; Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(), SquashEqualTransposeSiblings(), SquashEqualReshapeSiblings(), diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp index b4a3cea9e1..631e08c2ac 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.cpp +++ b/src/armnn/layers/ElementwiseBaseLayer.cpp @@ -22,18 +22,29 @@ ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const { ARMNN_ASSERT(inputShapes.size() == 2); - auto& input0 = inputShapes[0]; - auto& input1 = inputShapes[1]; + TensorShape input0 = inputShapes[0]; + TensorShape input1 = inputShapes[1]; + + if (m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly) + { + ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); + } + else if (m_ShapeInferenceMethod == ShapeInferenceMethod::InferAndValidate && + inputShapes[0].GetNumDimensions() < inputShapes[1].GetNumDimensions()) + { + input1 = inputShapes[0]; + input0 = inputShapes[1]; + } - // Get the max of the inputs. - ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); unsigned int numDims = input0.GetNumDimensions(); - std::vector<unsigned int> dims(numDims); + unsigned int shiftedDims = input0.GetNumDimensions() - input1.GetNumDimensions(); - for (unsigned int i = 0; i < numDims; i++) + // Get the max of the inputs. + std::vector<unsigned int> dims(numDims); + for (unsigned int i = shiftedDims; i < numDims; i++) { unsigned int dim0 = input0[i]; - unsigned int dim1 = input1[i]; + unsigned int dim1 = input1[i - shiftedDims]; #if !NDEBUG // Validate inputs are broadcast compatible. @@ -44,6 +55,12 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect dims[i] = std::max(dim0, dim1); } + // Fill in the rest of the shifted dimensions. + for (unsigned int i = 0; i < shiftedDims; i++) + { + dims[i] = input0[i]; + } + return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) }); } diff --git a/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp new file mode 100644 index 0000000000..6bb53d0f12 --- /dev/null +++ b/src/armnn/optimizations/AddBroadcastReshapeLayer.hpp @@ -0,0 +1,85 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Optimization.hpp" + +#include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> + +namespace armnn +{ +namespace optimizations +{ + +static const std::set<armnn::LayerType> broadcastOps { + LayerType::Addition, + LayerType::Division, + LayerType::Maximum, + LayerType::Minimum, + LayerType::Multiplication, + LayerType::Subtraction +}; + +class AddBroadcastReshapeLayerImpl +{ +public: + /// Run for every ElementwiseBaseLayer. Add Broadcast reshape layer if the inputs shape are different. + void Run(Graph& graph, Layer& layer) const + { + if (std::find(broadcastOps.begin(), broadcastOps.end(), layer.GetType()) != broadcastOps.end()) + { + layer.GetInputSlot(0).GetConnectedOutputSlot()->IsTensorInfoSet(); + layer.GetInputSlot(1).GetConnectedOutputSlot()->IsTensorInfoSet(); + + const TensorInfo &inputInfo0 = layer.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + const TensorInfo &inputInfo1 = layer.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(); + + if (inputInfo0.GetNumDimensions() == inputInfo1.GetNumDimensions()) + { + return; + } + + unsigned int reshapeSlot = 1; + TensorInfo reshapeInfo = inputInfo1; + TensorInfo inputInfo = inputInfo0; + + if (inputInfo0.GetNumDimensions() < inputInfo1.GetNumDimensions()) + { + reshapeSlot = 0; + reshapeInfo = inputInfo0; + inputInfo = inputInfo1; + } + + uint32_t numDimensions = inputInfo.GetNumDimensions(); + + std::vector<unsigned> reshapedDim; + for (unsigned int i = 0; i < reshapeInfo.GetNumDimensions(); ++i) + { + reshapedDim.push_back(reshapeInfo.GetShape()[i]); + } + + std::vector<unsigned int> reshapedDimensions(numDimensions, 1); + std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end()); + + reshapeInfo.SetShape(armnn::TensorShape{ numDimensions, reshapedDimensions.data() }); + const std::string layerName = "Reshape_for:" + layer.GetNameStr() + "-" + std::to_string(reshapeSlot); + const ReshapeDescriptor descriptor{reshapeInfo.GetShape()}; + ReshapeLayer *reshapeLayer = graph.InsertNewLayer<ReshapeLayer>(layer.GetInputSlot(reshapeSlot), + descriptor, + layerName.c_str()); + reshapeLayer->GetOutputSlot().SetTensorInfo(reshapeInfo); + } + } + +protected: + AddBroadcastReshapeLayerImpl() = default; + ~AddBroadcastReshapeLayerImpl() = default; +}; + +using AddBroadcastReshapeLayer = OptimizeForType<Layer, AddBroadcastReshapeLayerImpl>; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp index cb484d5a59..e89c36b834 100644 --- a/src/armnn/optimizations/All.hpp +++ b/src/armnn/optimizations/All.hpp @@ -4,6 +4,7 @@ // #pragma once +#include "AddBroadcastReshapeLayer.hpp" #include "AddDebug.hpp" #include "ConvertConstants.hpp" #include "ConvertFp32NetworkToBf16.hpp" diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp new file mode 100644 index 0000000000..fe3cc31838 --- /dev/null +++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp @@ -0,0 +1,288 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../GraphUtils.hpp" +#include "../TestUtils.hpp" + +#include <Optimizer.hpp> + +#include <boost/test/unit_test.hpp> + +using namespace armnn; + +BOOST_AUTO_TEST_SUITE(Optimizer) +using namespace optimizations; + +void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0, + const TensorInfo& info1, + const TensorInfo& outputInfo, + const std::string& reshapeLayerName, + const TensorShape& expectedReshapeShape, + const DataType expectedDataType) +{ + Graph graph; + + auto input0 = graph.AddLayer<InputLayer>(0, "input0"); + auto input1 = graph.AddLayer<InputLayer>(1, "input1"); + auto add = graph.AddLayer<AdditionLayer>("add"); + auto output = graph.AddLayer<OutputLayer>(0, "output"); + input0->GetOutputSlot().SetTensorInfo(info0); + input1->GetOutputSlot().SetTensorInfo(info1); + add->GetOutputSlot().SetTensorInfo(outputInfo); + + input0->GetOutputSlot().Connect(add->GetInputSlot(0)); + input1->GetOutputSlot().Connect(add->GetInputSlot(1)); + add->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<AdditionLayer>, + &IsLayerOfType<OutputLayer>)); + + // Run optimizer + armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer())); + + // Broadcast reshape layer has been added to the graph correctly + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<ReshapeLayer>, + &IsLayerOfType<AdditionLayer>, + &IsLayerOfType<OutputLayer>)); + + Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName); + BOOST_TEST(reshapeLayer); + auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); + + // Tensorshape and the data type are correct + BOOST_TEST((addedReshapeTensorInfo.GetShape() == expectedReshapeShape)); + BOOST_TEST((addedReshapeTensorInfo.GetDataType() == expectedDataType)); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSimpleTest) +{ + const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32); + const TensorInfo info1({ 1 }, DataType::Float32); + AddBroadcastReshapeLayerOptimizerTest(info0, info1, info0, "Reshape_for:add-1", + TensorShape({ 1, 1, 1, 1 }), + DataType::Float32); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer1DTest) +{ + const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32); + const TensorInfo info1({ 5 }, DataType::Float32); + const TensorInfo outputInfo({ 1, 1, 1, 5 }, DataType::Float32); + AddBroadcastReshapeLayerOptimizerTest(info0, info1, outputInfo, "Reshape_for:add-1", + TensorShape({ 1, 1, 1, 5 }), + DataType::Float32); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer2DTest) +{ + const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32); + const TensorInfo info1({ 3, 5 }, DataType::Float32); + const TensorInfo outputInfo({ 1, 2, 3, 5 }, DataType::Float32); + AddBroadcastReshapeLayerOptimizerTest(info0, info1, outputInfo, "Reshape_for:add-1", + TensorShape({ 1, 1, 3, 5 }), + DataType::Float32); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DTest) +{ + const TensorInfo info0({ 2, 1, 1, 1 }, DataType::Float32); + const TensorInfo info1({ 3, 4, 5 }, DataType::Float32); + const TensorInfo outputInfo({ 2, 3, 4, 5 }, DataType::Float32); + AddBroadcastReshapeLayerOptimizerTest(info0, info1, outputInfo, "Reshape_for:add-1", + TensorShape({ 1, 3, 4, 5 }), + DataType::Float32); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DMergedTest) +{ + const TensorInfo info0({ 2, 3, 1, 1 }, DataType::Float32); + const TensorInfo info1({ 3, 4, 5 }, DataType::Float32); + const TensorInfo outputInfo({ 2, 3, 4, 5 }, DataType::Float32); + AddBroadcastReshapeLayerOptimizerTest(info0, info1, outputInfo, "Reshape_for:add-1", + TensorShape({ 1, 3, 4, 5 }), + DataType::Float32); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest) +{ + Graph graph; + const TensorInfo info0({ 5 }, DataType::Float32); + const TensorInfo info1({ 1, 2, 3, 5 }, DataType::Float32); + const TensorInfo outputInfo({ 1, 2, 3, 5 }, DataType::Float32); + + auto input0 = graph.AddLayer<InputLayer>(0, "input0"); + auto input1 = graph.AddLayer<InputLayer>(1, "input1"); + auto sub = graph.AddLayer<SubtractionLayer>("sub"); + auto output = graph.AddLayer<OutputLayer>(0, "output"); + input0->GetOutputSlot().SetTensorInfo(info0); + input1->GetOutputSlot().SetTensorInfo(info1); + sub->GetOutputSlot().SetTensorInfo(outputInfo); + + input0->GetOutputSlot().Connect(sub->GetInputSlot(0)); + input1->GetOutputSlot().Connect(sub->GetInputSlot(1)); + sub->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<SubtractionLayer>, + &IsLayerOfType<OutputLayer>)); + + // Run optimizer + armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer())); + + // Broadcast reshape layer has been added to the graph correctly + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<ReshapeLayer>, + &IsLayerOfType<SubtractionLayer>, + &IsLayerOfType<OutputLayer>)); + + Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0"); + BOOST_TEST(reshapeLayer); + auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); + + // Tensorshape and the data type are correct + BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 }))); + BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::Float32)); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest) +{ + Graph graph; + const TensorInfo info0({ 1, 4, 5 }, DataType::QAsymmS8); + const TensorInfo info1({ 1, 2, 4, 5 }, DataType::QAsymmS8); + const TensorInfo outputInfo({ 1, 2, 4, 5 }, DataType::QAsymmS8); + + auto input0 = graph.AddLayer<InputLayer>(0, "input0"); + auto input1 = graph.AddLayer<InputLayer>(1, "input1"); + auto div = graph.AddLayer<DivisionLayer>("div"); + auto output = graph.AddLayer<OutputLayer>(0, "output"); + input0->GetOutputSlot().SetTensorInfo(info0); + input1->GetOutputSlot().SetTensorInfo(info1); + div->GetOutputSlot().SetTensorInfo(outputInfo); + + input0->GetOutputSlot().Connect(div->GetInputSlot(0)); + input1->GetOutputSlot().Connect(div->GetInputSlot(1)); + div->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<DivisionLayer>, + &IsLayerOfType<OutputLayer>)); + + // Run optimizer + armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer())); + + // Broadcast reshape layer has been added to the graph correctly + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<ReshapeLayer>, + &IsLayerOfType<DivisionLayer>, + &IsLayerOfType<OutputLayer>)); + + Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0"); + BOOST_TEST(reshapeLayer); + auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); + + // Tensorshape and the data type are correct + BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 }))); + BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8)); +} + +BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest) +{ + Graph graph; + const TensorInfo info0({ 3, 5 }, DataType::QAsymmU8); + const TensorInfo info1({ 1, 2, 3, 5 }, DataType::QAsymmU8); + const TensorInfo outputInfo({ 1, 2, 3, 5 }, DataType::QAsymmU8); + + auto input0 = graph.AddLayer<InputLayer>(0, "input0"); + auto input1 = graph.AddLayer<InputLayer>(1, "input1"); + auto mul = graph.AddLayer<MultiplicationLayer>("mul"); + auto output = graph.AddLayer<OutputLayer>(0, "output"); + input0->GetOutputSlot().SetTensorInfo(info0); + input1->GetOutputSlot().SetTensorInfo(info1); + mul->GetOutputSlot().SetTensorInfo(outputInfo); + + input0->GetOutputSlot().Connect(mul->GetInputSlot(0)); + input1->GetOutputSlot().Connect(mul->GetInputSlot(1)); + mul->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<OutputLayer>)); + + // Run optimizer + armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer())); + + // Broadcast reshape layer has been added to the graph correctly + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<ReshapeLayer>, + &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<OutputLayer>)); + + Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0"); + BOOST_TEST(reshapeLayer); + auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo(); + + // Tensorshape and the data type are correct + BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 }))); + BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8)); +} + +BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest) +{ + Graph graph; + const TensorInfo info0({ 1, 1, 1, 1 }, DataType::QAsymmU8); + const TensorInfo info1({ 1, 2, 3, 5 }, DataType::QAsymmU8); + const TensorInfo outputInfo({ 1, 2, 3, 5 }, DataType::QAsymmU8); + + auto input0 = graph.AddLayer<InputLayer>(0, "input0"); + auto input1 = graph.AddLayer<InputLayer>(1, "input1"); + auto mul = graph.AddLayer<MultiplicationLayer>("mul"); + auto output = graph.AddLayer<OutputLayer>(0, "output"); + input0->GetOutputSlot().SetTensorInfo(info0); + input1->GetOutputSlot().SetTensorInfo(info1); + mul->GetOutputSlot().SetTensorInfo(outputInfo); + + input0->GetOutputSlot().Connect(mul->GetInputSlot(0)); + input1->GetOutputSlot().Connect(mul->GetInputSlot(1)); + mul->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<OutputLayer>)); + + // Run optimizer + armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer())); + + // Broadcast reshape layer has not been added to the graph + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<InputLayer>, + &IsLayerOfType<MultiplicationLayer>, + &IsLayerOfType<OutputLayer>)); + + Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0"); + BOOST_TEST(!reshapeLayer); +} + +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file |