diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2022-08-03 11:27:05 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-29 10:12:21 +0100 |
commit | b016157f1eea1acc6a84308521c0b90543161da4 (patch) | |
tree | fe228d1014f4fa9a4f74227d0640719d1d92193c /shim/sl/canonical/ConversionUtils.cpp | |
parent | ee480d2d6538b0192d40a00ed696b30e2587430c (diff) | |
download | armnn-b016157f1eea1acc6a84308521c0b90543161da4.tar.gz |
IVGCVSW-6954 'Arm NN SL Improvements'
* Move the Conv2D and DepthwiseConv2D validation to Optimization level
when the weights and tensors are as constant inputs
* Take into account offset and scales values when doing INT8 to FP32 dequantization
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I1f81f15640395ac041923b10dbe9151159715117
Diffstat (limited to 'shim/sl/canonical/ConversionUtils.cpp')
-rw-r--r-- | shim/sl/canonical/ConversionUtils.cpp | 46 |
1 files changed, 35 insertions, 11 deletions
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp index 96a8ddca6a..f48af32e21 100644 --- a/shim/sl/canonical/ConversionUtils.cpp +++ b/shim/sl/canonical/ConversionUtils.cpp @@ -67,6 +67,11 @@ void LayerInputHandle::SanitizeQuantizationScale(LayerInputHandle& weight, Layer } } +armnn::IOutputSlot* LayerInputHandle::GetOutputSlot() const +{ + return m_OutputSlot; +} + ConstTensorPin::ConstTensorPin(bool optional) : m_Optional(optional) {} @@ -276,17 +281,6 @@ LayerInputHandle ConvertToLayerInputHandle(const Operation& operation, case OperandLifeTime::CONSTANT_REFERENCE: { auto constantTensorDataType = operandTensorInfo.GetDataType(); - if (inputHandle) - { - if ((inputHandle->GetTensorInfo().GetDataType() == armnn::DataType::Float32 - || inputHandle->GetTensorInfo().GetDataType() == armnn::DataType::Float16) - && (operandTensorInfo.GetDataType() == armnn::DataType::QAsymmU8 - || operandTensorInfo.GetDataType() == armnn::DataType::QAsymmS8)) - { - constantTensorDataType = inputHandle->GetTensorInfo().GetDataType(); - } - } - // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer. ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, @@ -1029,4 +1023,34 @@ bool SetupAndTrackLayerOutputSlot(const Operation& operation, return true; } +bool IsConnectedToDequantize(armnn::IOutputSlot* ioutputSlot) +{ + VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize()"; + if (!ioutputSlot) + { + return false; + } + VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid."; + // Find the connections and layers.. + armnn::IConnectableLayer& owningLayer = ioutputSlot->GetOwningIConnectableLayer(); + if (owningLayer.GetType() == armnn::LayerType::Dequantize) + { + VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer."; + armnn::IInputSlot& inputSlot = owningLayer.GetInputSlot(0); + armnn::IOutputSlot* connection = inputSlot.GetConnection(); + if (connection) + { + VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection."; + armnn::IConnectableLayer& connectedLayer = + connection->GetOwningIConnectableLayer(); + if (connectedLayer.GetType() == armnn::LayerType::Constant) + { + VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant"; + return true; + } + } + } + return false; +} + } // namespace armnn_driver |