From 0bd89a87f15bf0983eace53df1160a3e64bc0e75 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Tue, 2 Jul 2019 16:53:10 +0100 Subject: IVGCVSW-3370 Add broadcasting support to PReLU to properly run the Android VTS/NN tests * Updated ConvertPrelu to support input broadcasting * Updated the BroadcastTensor utility function so that it preserves the order of the inputs * Updated the documentation Signed-off-by: Matteo Martincigh Change-Id: Ibbac6f7161132740e61c85f597f8be70cd5d7325 --- ConversionUtils.hpp | 125 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 53 deletions(-) (limited to 'ConversionUtils.hpp') diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 36bc4ae1..9a711cb7 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -201,55 +201,91 @@ inline bool IsBool(V1_2::Operand operand) #endif -void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer, - armnn::INetwork& network) +template +armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer, + armnn::TensorInfo reshapeInfo) +{ + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape(); + + armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor); + BOOST_ASSERT(reshapeLayer != nullptr); + + // Attach the input layer to the reshape layer + inputLayer.Connect(reshapeLayer->GetInputSlot(0)); + reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo); + + return *reshapeLayer; +} + +void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, + armnn::IConnectableLayer* startLayer, armnn::INetwork& network) { BOOST_ASSERT(startLayer != nullptr); - const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); - if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions()) + const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo(); + + unsigned int inputDimensions0 = inputInfo0.GetNumDimensions(); + unsigned int inputDimensions1 = inputInfo1.GetNumDimensions(); + + if (inputDimensions0 == inputDimensions1) { - // If the number of dimensions do not match then we need to add degenerate dimensions - // to the "smaller" tensor using a reshape: - // Small Big - // | | - // Reshape | - // \ / - // Add - bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions(); + // The inputs have the same number of dimensions, simply connect them to the given layer as they are + input0.Connect(startLayer->GetInputSlot(0)); + input1.Connect(startLayer->GetInputSlot(1)); - LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0; - const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo(); + return; + } - LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1; - const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo(); + // Since the number of dimensions do not match then we need to add degenerate dimensions + // to the "smaller" tensor using a reshape, while keeping the order of the inputs. - const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions(); - std::vector reshapedDims(bigTensorDimsNumber, 1); - unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions(); - for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i) - { - reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference]; - } - armnn::TensorInfo reshapedInfo = smallTensorDims; - reshapedInfo.SetShape(armnn::TensorShape{ static_cast(reshapedDims.size()), - reshapedDims.data() }); + unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1); + unsigned int sizeDifference = std::abs(boost::numeric_cast(inputDimensions0) - + boost::numeric_cast(inputDimensions1)); - armnn::ReshapeDescriptor reshapeDesc; - reshapeDesc.m_TargetShape = reshapedInfo.GetShape(); - armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc); - smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0)); - reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo); + bool input0IsSmaller = inputDimensions0 < inputDimensions1; + LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1; + const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo(); - // Connect the outputs from new reshape and original input layer - reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0)); - bigTensorHandle.Connect(startLayer->GetInputSlot(1)); + const armnn::TensorShape& smallShape = smallInfo.GetShape(); + std::vector reshapedDimensions(maxInputDimensions, 1); + for (unsigned int i = sizeDifference; i < maxInputDimensions; i++) + { + reshapedDimensions[i] = smallShape[i - sizeDifference]; + } + + armnn::TensorInfo reshapedInfo = smallInfo; + reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast(reshapedDimensions.size()), + reshapedDimensions.data() }); + armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo); + + if (input0IsSmaller) + { + // Input0 is the "smaller" tensor, connect the reshape layer as follows: + // + // Input0 Input1 + // | | + // Reshape | + // \ / + // StartLayer + + reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0)); + input1.Connect(startLayer->GetInputSlot(1)); } else { + // Input1 is the "smaller" tensor, connect the reshape layer as follows: + // + // Input0 Input1 + // | | + // | Reshape + // \ / + // StartLayer + input0.Connect(startLayer->GetInputSlot(0)); - input1.Connect(startLayer->GetInputSlot(1)); + reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1)); } } @@ -402,23 +438,6 @@ bool RequiresReshape(armnn::TensorShape & inputShape) return inputShape.GetNumDimensions() < 3; } -template -armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer, - armnn::TensorInfo reshapeInfo) -{ - armnn::ReshapeDescriptor reshapeDescriptor; - reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape(); - - armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor); - BOOST_ASSERT(reshapeLayer != nullptr); - - // Attach the input layer to the reshape layer - inputLayer.Connect(reshapeLayer->GetInputSlot(0)); - reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo); - - return *reshapeLayer; -} - void SwizzleInputs(armnn::INetwork& network, std::vector& inputs, std::vector& inputShapes, -- cgit v1.2.1