aboutsummaryrefslogtreecommitdiff
path: root/shim/sl/canonical/ConversionUtils.cpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2022-08-03 11:27:05 +0100
committerNikhil Raj <nikhil.raj@arm.com>2022-08-29 10:12:21 +0100
commitb016157f1eea1acc6a84308521c0b90543161da4 (patch)
treefe228d1014f4fa9a4f74227d0640719d1d92193c /shim/sl/canonical/ConversionUtils.cpp
parentee480d2d6538b0192d40a00ed696b30e2587430c (diff)
downloadarmnn-b016157f1eea1acc6a84308521c0b90543161da4.tar.gz
IVGCVSW-6954 'Arm NN SL Improvements'
* Move the Conv2D and DepthwiseConv2D validation to Optimization level when the weights and tensors are as constant inputs * Take into account offset and scales values when doing INT8 to FP32 dequantization Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I1f81f15640395ac041923b10dbe9151159715117
Diffstat (limited to 'shim/sl/canonical/ConversionUtils.cpp')
-rw-r--r--shim/sl/canonical/ConversionUtils.cpp46
1 files changed, 35 insertions, 11 deletions
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp
index 96a8ddca6a..f48af32e21 100644
--- a/shim/sl/canonical/ConversionUtils.cpp
+++ b/shim/sl/canonical/ConversionUtils.cpp
@@ -67,6 +67,11 @@ void LayerInputHandle::SanitizeQuantizationScale(LayerInputHandle& weight, Layer
}
}
+armnn::IOutputSlot* LayerInputHandle::GetOutputSlot() const
+{
+ return m_OutputSlot;
+}
+
ConstTensorPin::ConstTensorPin(bool optional)
: m_Optional(optional)
{}
@@ -276,17 +281,6 @@ LayerInputHandle ConvertToLayerInputHandle(const Operation& operation,
case OperandLifeTime::CONSTANT_REFERENCE:
{
auto constantTensorDataType = operandTensorInfo.GetDataType();
- if (inputHandle)
- {
- if ((inputHandle->GetTensorInfo().GetDataType() == armnn::DataType::Float32
- || inputHandle->GetTensorInfo().GetDataType() == armnn::DataType::Float16)
- && (operandTensorInfo.GetDataType() == armnn::DataType::QAsymmU8
- || operandTensorInfo.GetDataType() == armnn::DataType::QAsymmS8))
- {
- constantTensorDataType = inputHandle->GetTensorInfo().GetDataType();
- }
- }
-
// The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand,
model,
@@ -1029,4 +1023,34 @@ bool SetupAndTrackLayerOutputSlot(const Operation& operation,
return true;
}
+bool IsConnectedToDequantize(armnn::IOutputSlot* ioutputSlot)
+{
+ VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize()";
+ if (!ioutputSlot)
+ {
+ return false;
+ }
+ VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid.";
+ // Find the connections and layers..
+ armnn::IConnectableLayer& owningLayer = ioutputSlot->GetOwningIConnectableLayer();
+ if (owningLayer.GetType() == armnn::LayerType::Dequantize)
+ {
+ VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer.";
+ armnn::IInputSlot& inputSlot = owningLayer.GetInputSlot(0);
+ armnn::IOutputSlot* connection = inputSlot.GetConnection();
+ if (connection)
+ {
+ VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection.";
+ armnn::IConnectableLayer& connectedLayer =
+ connection->GetOwningIConnectableLayer();
+ if (connectedLayer.GetType() == armnn::LayerType::Constant)
+ {
+ VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant";
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace armnn_driver