From ee18dc8d1725f472850ab0c398fd7cbc4b850891 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Wed, 17 Jul 2019 11:27:46 +0100 Subject: IVGCVSW-3469 Add front end for Quantized LSTM layer * Added new layer QuantizedLstm (Android Q) * Made necessary changes to APIs * Added unit tests Change-Id: I3b9f16b0e7e49f51932cf204c87cb7118798123a Signed-off-by: James Conroy --- src/backends/backendsCommon/LayerSupportBase.cpp | 11 ++++ src/backends/backendsCommon/LayerSupportBase.hpp | 8 +++ src/backends/backendsCommon/WorkloadData.hpp | 37 +++++++++++ src/backends/backendsCommon/WorkloadFactory.cpp | 76 +++++++++++++++++++++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 + .../test/IsLayerSupportedTestImpl.hpp | 77 ++++++++++++++++++++-- 6 files changed, 207 insertions(+), 5 deletions(-) (limited to 'src/backends') diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index e843423a92..f202fedb4f 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -347,6 +347,17 @@ bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& input, + const TensorInfo& previousCellStateIn, + const TensorInfo& previousOutputIn, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const QuantizedLstmInputParamsInfo& paramsInfo, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index d49fc3e371..c860e34874 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -214,6 +214,14 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsQuantizedLstmSupported(const TensorInfo& input, + const TensorInfo& previousCellStateIn, + const TensorInfo& previousOutputIn, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const QuantizedLstmInputParamsInfo& paramsInfo, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsReshapeSupported(const TensorInfo& input, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index f3d50699e6..d790dafd58 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -477,4 +477,41 @@ struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParametersIsQuantizeSupported(input, output, reason); break; } + case LayerType::QuantizedLstm: + { + auto cLayer = boost::polymorphic_downcast(&layer); + + // Inputs + const TensorInfo& input = OverrideDataType( + layer.GetInputSlot(0).GetConnection()->GetTensorInfo(), dataType); + const TensorInfo& previousCellStateIn = OverrideDataType( + layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType); + const TensorInfo& previousOutputIn = OverrideDataType( + layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType); + + // Outputs + const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType); + const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType); + + // QuantizedLstm parameters + const TensorInfo& inputToInputWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(), dataType); + const TensorInfo& inputToForgetWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(), dataType); + const TensorInfo& inputToCellWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(), dataType); + const TensorInfo& inputToOutputWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(), dataType); + + const TensorInfo& recurrentToInputWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType); + const TensorInfo& recurrentToForgetWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType); + const TensorInfo& recurrentToCellWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType); + const TensorInfo& recurrentToOutputWeights = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType); + + const TensorInfo& inputGateBias = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(), dataType); + const TensorInfo& forgetGateBias = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(), dataType); + const TensorInfo& cellBias = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(), dataType); + const TensorInfo& outputGateBias = OverrideDataType( + cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(), dataType); + + QuantizedLstmInputParamsInfo paramsInfo; + + paramsInfo.m_InputToInputWeights = &inputToInputWeights; + paramsInfo.m_InputToForgetWeights = &inputToForgetWeights; + paramsInfo.m_InputToCellWeights = &inputToCellWeights; + paramsInfo.m_InputToOutputWeights = &inputToOutputWeights; + + paramsInfo.m_RecurrentToInputWeights = &recurrentToInputWeights; + paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights; + paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights; + paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights; + + paramsInfo.m_InputGateBias = &inputGateBias; + paramsInfo.m_ForgetGateBias = &forgetGateBias; + paramsInfo.m_CellBias = &cellBias; + paramsInfo.m_OutputGateBias = &outputGateBias; + + result = layerSupportObject->IsQuantizedLstmSupported(input, + previousCellStateIn, + previousOutputIn, + cellStateOut, + output, + paramsInfo, + reason); + break; + } case LayerType::Division: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -1109,6 +1179,12 @@ std::unique_ptr IWorkloadFactory::CreateQuantize(const QuantizeQueueD return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 749a258a9d..6d03da74fc 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -161,6 +161,9 @@ public: virtual std::unique_ptr CreateQuantize(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& Info) const; + virtual std::unique_ptr CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr CreateReshape(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 6aff7596b5..451c585adc 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -27,14 +27,17 @@ template armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs) { armnn::WorkloadInfo info; + for (unsigned int i=0; i < numInputs; i++) { info.m_InputTensorInfos.push_back(MakeDummyTensorInfo()); } + for (unsigned int o=0; o < numOutputs; o++) { info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo()); } + return info; } @@ -46,10 +49,12 @@ struct DummyLayer { m_Layer = dummyGraph.AddLayer(DescType(), ""); } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + LayerType* m_Layer; }; @@ -61,10 +66,12 @@ struct DummyLayer { m_Layer = dummyGraph.AddLayer(""); } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + LayerType* m_Layer; }; @@ -83,12 +90,13 @@ struct DummyLayer m_Layer->m_Gamma = std::make_unique( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } - armnn::BatchNormalizationLayer* m_Layer; + armnn::BatchNormalizationLayer* m_Layer; }; template<> @@ -98,10 +106,12 @@ struct DummyLayer { m_Layer = dummyGraph.AddLayer(armnn::BatchToSpaceNdDescriptor(), ""); } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::BatchToSpaceNdLayer* m_Layer; }; @@ -112,10 +122,12 @@ struct DummyLayer { m_Layer = dummyGraph.AddLayer(""); } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::ConstantLayer* m_Layer; }; @@ -125,12 +137,13 @@ struct DummyLayer DummyLayer() { m_Layer = dummyGraph.AddLayer(armnn::LayerBindingId(), ""); - } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::InputLayer* m_Layer; }; @@ -141,12 +154,13 @@ struct DummyLayer { armnn::OriginsDescriptor desc(2); m_Layer = dummyGraph.AddLayer(desc, ""); - } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::ConcatLayer* m_Layer; }; @@ -156,12 +170,13 @@ struct DummyLayer DummyLayer() { m_Layer = dummyGraph.AddLayer(armnn::LayerBindingId(), ""); - } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::OutputLayer* m_Layer; }; @@ -172,12 +187,13 @@ struct DummyLayer { armnn::ViewsDescriptor desc(1); m_Layer = dummyGraph.AddLayer(desc, ""); - } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::SplitterLayer* m_Layer; }; @@ -193,10 +209,12 @@ struct DummyConvolutionLayer m_Layer->m_Bias = std::make_unique( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } + ~DummyConvolutionLayer() { dummyGraph.EraseLayer(m_Layer); } + ConvolutionLayerType* m_Layer; }; @@ -255,10 +273,12 @@ struct DummyLstmLayer m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } + ~DummyLstmLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::LstmLayer* m_Layer; }; @@ -268,6 +288,49 @@ struct DummyLayer { }; +template<> +struct DummyLayer +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer(""); + + m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + + m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8)); + + m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); + m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); + m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); + m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32)); + } + + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + + armnn::QuantizedLstmLayer* m_Layer; +}; + template<> struct DummyLayer { @@ -278,10 +341,12 @@ struct DummyLayer m_Layer->m_Weight = std::make_unique( armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); } + ~DummyLayer() { dummyGraph.EraseLayer(m_Layer); } + armnn::FullyConnectedLayer* m_Layer; }; @@ -392,6 +457,8 @@ DECLARE_LAYER_POLICY_2_PARAM(PreCompiled) DECLARE_LAYER_POLICY_1_PARAM(Prelu) +DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm) + DECLARE_LAYER_POLICY_1_PARAM(Division) DECLARE_LAYER_POLICY_2_PARAM(Resize) -- cgit v1.2.1