aboutsummaryrefslogtreecommitdiff
path: root/ConversionUtils.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'ConversionUtils.cpp')
-rw-r--r--ConversionUtils.cpp44
1 files changed, 37 insertions, 7 deletions
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
index 9cc6e286..c691c554 100644
--- a/ConversionUtils.cpp
+++ b/ConversionUtils.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,11 @@ bool LayerInputHandle::IsValid() const
void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
{
- ARMNN_ASSERT(IsValid());
+ if (!IsValid())
+ {
+ throw armnn::RuntimeException("LayerInputHandle is invalid");
+ }
+
if (m_OutputSlot)
{
m_OutputSlot->Connect(inputSlot);
@@ -40,7 +44,10 @@ void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
{
- ARMNN_ASSERT(IsValid());
+ if (!IsValid())
+ {
+ throw armnn::RuntimeException("LayerInputHandle is invalid");
+ }
if (m_OutputSlot)
{
m_OutputSlot->Disconnect(inputSlot);
@@ -52,14 +59,31 @@ const armnn::TensorInfo& LayerInputHandle::GetTensorInfo() const
return m_TensorInfo;
}
+void LayerInputHandle::SanitizeQuantizationScale(LayerInputHandle& weight,
+ LayerInputHandle& input)
+{
+ if (m_OutputSlot)
+ {
+ armnn::TensorInfo weightInfo = weight.GetTensorInfo();
+ armnn::TensorInfo inputInfo = input.GetTensorInfo();
+ armnn::TensorInfo biasInfo = GetTensorInfo();
+
+ SanitizeBiasQuantizationScale(biasInfo, weightInfo, inputInfo);
+
+ m_TensorInfo = biasInfo;
+ m_OutputSlot->SetTensorInfo(biasInfo);
+ }
+}
+
ConstTensorPin::ConstTensorPin(bool optional)
: m_Optional(optional)
{}
-ConstTensorPin::ConstTensorPin(const armnn::TensorInfo& tensorInfo,
+ConstTensorPin::ConstTensorPin(armnn::TensorInfo& tensorInfo,
const void* valueStart,
uint32_t numBytes,
const armnn::PermutationVector& mappings)
+ : m_Optional(false)
{
armnn::IgnoreUnused(numBytes);
if (tensorInfo.GetNumBytes() != numBytes)
@@ -73,7 +97,7 @@ ConstTensorPin::ConstTensorPin(const armnn::TensorInfo& tensorInfo,
m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
- m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
+ m_ConstTensor = armnn::ConstTensor(tensorInfo, m_SwizzledTensorData.data());
}
else
{
@@ -115,8 +139,11 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
armnn::IConnectableLayer* prevLayer,
ConversionData& data)
{
- ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
-
+ if (prevLayer->GetNumOutputSlots() != 1)
+ {
+ Fail("%s: Incorrect Number of OutputSlots expected 1 was %i", __func__, prevLayer->GetNumOutputSlots());
+ return nullptr;
+ }
prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
armnn::IConnectableLayer* activationLayer = prevLayer;
@@ -164,10 +191,12 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
prevLayer->GetOutputSlot(0).GetTensorInfo(),
tensorInfo,
activationDesc);
@@ -177,6 +206,7 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
}
activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);