aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2023-01-17 15:19:20 +0000
committerRyan OShea <ryan.oshea3@arm.com>2023-02-03 10:31:10 +0000
commit4c231de93b9b7c2af24e155550cb80b96f2c4bb5 (patch)
treefcbc43d7436cacc63ef7a90e73023135694ddf48
parent555dc0982f85f0a146e99189bac523151797a056 (diff)
downloadarmnn-4c231de93b9b7c2af24e155550cb80b96f2c4bb5.tar.gz
IVGCVSW-7501 Allow constant tensors as inputs for input data in the delegate
In the TLCT tests we were failing many tests because they used constant tensors as data input for the layers. We had the functionality in place but we didnt have it spread across the visit functions. * Check if inputs are constant tensors and attempt to assign them to input slot of layers. * Add missing checks to some functions that return a kTfLiteStatus so we can see if they fail * Clean up CreateConstTensor function Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: I8610b770aea56932a98f91c961d59b3de47c2ab5
-rw-r--r--delegate/src/Activation.hpp8
-rw-r--r--delegate/src/ArgMinMax.hpp8
-rw-r--r--delegate/src/BatchMatMul.hpp11
-rw-r--r--delegate/src/BatchSpace.hpp14
-rw-r--r--delegate/src/Comparison.hpp8
-rw-r--r--delegate/src/Control.hpp7
-rw-r--r--delegate/src/Convolution.hpp92
-rw-r--r--delegate/src/DelegateUtils.hpp36
-rw-r--r--delegate/src/ElementwiseUnary.hpp8
-rw-r--r--delegate/src/FullyConnected.hpp35
-rw-r--r--delegate/src/Normalization.hpp14
-rw-r--r--delegate/src/Pooling.hpp24
-rw-r--r--delegate/src/Quantization.hpp8
-rw-r--r--delegate/src/Redefine.hpp14
-rw-r--r--delegate/src/Reduce.hpp8
-rw-r--r--delegate/src/Resize.hpp8
-rw-r--r--delegate/src/Round.hpp8
-rw-r--r--delegate/src/Shape.hpp8
-rw-r--r--delegate/src/Slice.hpp8
-rw-r--r--delegate/src/Softmax.hpp8
-rw-r--r--delegate/src/SpaceDepth.hpp14
-rw-r--r--delegate/src/Split.hpp8
-rw-r--r--delegate/src/StridedSlice.hpp8
-rw-r--r--delegate/src/Transpose.hpp8
24 files changed, 283 insertions, 90 deletions
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 3560bfdae7..59066d23e3 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -120,6 +120,12 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(activationLayer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index dd28807f67..4e4a2a3f3a 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -119,6 +119,12 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/BatchMatMul.hpp b/delegate/src/BatchMatMul.hpp
index 3b884a092f..49fba05238 100644
--- a/delegate/src/BatchMatMul.hpp
+++ b/delegate/src/BatchMatMul.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -95,8 +95,13 @@ namespace armnnDelegate
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(layer, tfLiteNode, delegateData);
- return kTfLiteOk;
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 903fe37eae..30c6dbfc15 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -103,6 +103,12 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
@@ -197,6 +203,12 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index ee121e3c5c..80354e835d 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -116,6 +116,12 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
inputTensorInfo1,
comparisonLayer,
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 2f83d2a37e..17f23d81ad 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -303,6 +303,13 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
return Connect(meanLayer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 17189027dc..a8559e2548 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -115,7 +115,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
- armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
armnn::TensorInfo biasTensorInfo;
if(biasEnabled)
@@ -181,12 +181,11 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
layer->SetBackendId(setBackend);
- if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
+ if(filterTensorInfo.IsConstant())
{
auto filter =
CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
- filterTensorInfo,
- armnn::Optional<armnn::PermutationVector &>());
+ filterTensorInfo);
armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -196,7 +195,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
if (biasEnabled)
{
const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+ if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
@@ -206,6 +205,18 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
}
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -290,7 +301,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
}
- armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
armnn::TensorInfo biasTensorInfo;
if(biasEnabled)
@@ -354,11 +365,10 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
// Add a constant layer for weights and biases if inputs are constant,
// which are connected to the Convolution3d layer as inputs.
- if (tflite::IsConstantTensor(&tfLiteFilterTensor))
+ if (filterTensorInfo.IsConstant())
{
auto filter = CreateConstTensor(&tfLiteFilterTensor,
- filterTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ filterTensorInfo);
armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
ARMNN_ASSERT(weightsLayer != nullptr);
@@ -370,11 +380,10 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
if(biasEnabled)
{
const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+ if(biasTensorInfo.IsConstant())
{
auto biases = CreateConstTensor(&tfLiteBiasTensor,
- biasTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ biasTensorInfo);
armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
ARMNN_ASSERT(biasLayer != nullptr);
@@ -384,6 +393,18 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
}
}
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
@@ -499,7 +520,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
}
- armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
// Assuming input is NHWC
unsigned int inputHeight = inputTensorInfo.GetShape()[1];
@@ -563,7 +584,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
layer->SetBackendId(setBackend);
- if(tflite::IsConstantTensor(&tfLiteFilterTensor))
+ if(filterTensorInfo.IsConstant())
{
// For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
@@ -576,7 +597,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
if (biasEnabled)
{
const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+ if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
@@ -586,6 +607,18 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
}
}
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -636,19 +669,19 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
return kTfLiteError;
}
- armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
- std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
- if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
+ const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
+ std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
+ if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
{
- for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
+ for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
{
outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
}
}
- if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
+ if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
{
- for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
+ for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
{
outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
}
@@ -716,7 +749,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
- armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+ const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
// TfLite uses NHWC tensors
const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
@@ -743,8 +776,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
// Set up filter
auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
- filterTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ filterTensorInfo);
armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
@@ -769,6 +801,18 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
+ // The data input can be constant, so we must check that this is allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index c0bef4f994..3e74225b15 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -545,10 +545,7 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
}
armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
- armnn::TensorInfo& tensorInfo,
- armnn::Optional<armnn::PermutationVector&>
- permutationVector = armnn::EmptyOptional(),
- void* permutationData = nullptr)
+ const armnn::TensorInfo& tensorInfo)
{
if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
{
@@ -556,28 +553,7 @@ armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
"TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
}
- if(tflite::IsConstantTensor(tfLiteTensor))
- {
- tensorInfo.SetConstant();
- }
-
- if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
- {
- // Permute tensor info
- tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
- // then permute data using the shape from permuted tensor info
- armnnUtils::Permute(tensorInfo.GetShape(),
- permutationVector.value(),
- tfLiteTensor->data.data,
- permutationData,
- armnn::GetDataTypeSize(tensorInfo.GetDataType()));
-
- return armnn::ConstTensor(tensorInfo, permutationData);
- }
- else
- {
- return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
- }
+ return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
}
armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
@@ -611,7 +587,7 @@ void CalcPadding(uint32_t inputSize,
}
TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
- armnn::TensorInfo& constTensorInfo,
+ const armnn::TensorInfo& constTensorInfo,
TfLiteContext* tfLiteContext,
const TfLiteTensor& tfLiteTensor,
armnnDelegate::DelegateData& data,
@@ -633,8 +609,7 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
}
auto constantInput = CreateConstTensor(&tfLiteTensor,
- constTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ constTensorInfo);
armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
@@ -684,8 +659,7 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
return kTfLiteError;
}
auto constantInput = CreateConstTensor(&tfLiteInputTensor,
- inputTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ inputTensorInfo);
armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index 947e531162..4be6fba82e 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -78,6 +78,12 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 337f1153a1..1129951104 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -54,7 +54,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- armnn::TensorInfo weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
+ const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
// Check that we support fused activation before we attempt to create a layer
@@ -82,8 +82,6 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
return kTfLiteError;
}
- bool isConstantWeights = tflite::IsConstantTensor(&tfLiteWeightsTensor);
-
armnn::TensorInfo biasTensorInfo;
if (biasEnabled)
{
@@ -141,7 +139,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
armnn::FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
descriptor.m_BiasEnabled = biasEnabled;
- descriptor.m_ConstantWeights = isConstantWeights;
+ descriptor.m_ConstantWeights = weightsTensorInfo.IsConstant();
bool isSupported = false;
armnn::BackendId setBackend;
@@ -172,11 +170,10 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
ARMNN_ASSERT(layer != nullptr);
// Add a constant layer for weights and biases if inputs are constant.
- if (isConstantWeights)
+ if (weightsTensorInfo.IsConstant())
{
auto weightsTensor = CreateConstTensor(&tfLiteWeightsTensor,
- weightsTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ weightsTensorInfo);
armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor);
@@ -187,11 +184,10 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
if (biasEnabled)
{
const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
- if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+ if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor,
- biasTensorInfo,
- armnn::Optional<armnn::PermutationVector&>());
+ biasTensorInfo);
armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
ARMNN_ASSERT(biasLayer != nullptr);
@@ -201,6 +197,18 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
}
+ // The data input can also be constant, so we must check that this is also allocated to an input slot
+ if(inputTensorInfo.IsConstant())
+ {
+ auto input =
+ CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+ inputTensorInfo);
+
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ }
+
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
@@ -224,7 +232,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(1));
}
- if (biasEnabled && !tflite::IsConstantTensor(&tfLiteTensors[tfLiteNode->inputs->data[2]]))
+ if (biasEnabled && !biasTensorInfo.IsConstant())
{
delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(2));
}
@@ -233,7 +241,10 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
if (reshapeLayer == nullptr)
{
- Connect(layer, tfLiteNode, delegateData);
+ if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
}
if (outputTensorInfo.GetNumDimensions() > 2)
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index d0db43ea7c..ef2e524369 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -70,6 +70,12 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
@@ -143,6 +149,12 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index 4dc8e0da98..1178b6d8dc 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -123,7 +123,17 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(poolingLayer, tfLiteNode, delegateData);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
// Check and create activation
return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
@@ -299,7 +309,17 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
// Create and set output slots
armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- Connect(poolingLayer, tfLiteNode, delegateData);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
}
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 64f57de505..f1192960e4 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -159,6 +159,12 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
return Connect(quantizeLayer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index 8f9a4e4ba0..864fb7af67 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -74,6 +74,12 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
@@ -240,6 +246,12 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index 3f4c118e3d..2d8b462cd2 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -133,6 +133,12 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index 0cb15d30e4..370f1ab2d2 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -191,6 +191,12 @@ TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
ARMNN_ASSERT(resizeLayer != nullptr);
return Connect(resizeLayer, tfLiteNode, delegateData);
diff --git a/delegate/src/Round.hpp b/delegate/src/Round.hpp
index b920bd5d2f..7a060b1d8f 100644
--- a/delegate/src/Round.hpp
+++ b/delegate/src/Round.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -58,6 +58,12 @@ TfLiteStatus VisitFloorOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index 625e6a88fb..d797563ab5 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -82,6 +82,12 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index d5712aefad..f19e3327e4 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -127,6 +127,12 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index 738f542239..31c6ac3677 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -142,6 +142,12 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(softmaxLayer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index 2172d8678b..cc7f03413d 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -68,6 +68,12 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
@@ -133,6 +139,12 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index 5c094b405b..b183b55c54 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -334,6 +334,12 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
}
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/StridedSlice.hpp b/delegate/src/StridedSlice.hpp
index d2c4d5da3a..998e3d3e14 100644
--- a/delegate/src/StridedSlice.hpp
+++ b/delegate/src/StridedSlice.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -142,6 +142,12 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
// Connect
return Connect(layer, tfLiteNode, delegateData);
}
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 15c53101f2..41178d0b59 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -99,6 +99,12 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
return Connect(transposeLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate