aboutsummaryrefslogtreecommitdiff
path: root/delegate/opaque/src/armnn_delegate.cpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2023-04-11 10:54:07 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2023-04-13 12:45:40 +0000
commita37ccb006ad0bc49c34ff0d1741fe04d9ca55d5b (patch)
tree215ac44fbbe6093b5fe0eb3ee21fa09a31972c10 /delegate/opaque/src/armnn_delegate.cpp
parentc5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (diff)
downloadarmnn-a37ccb006ad0bc49c34ff0d1741fe04d9ca55d5b.tar.gz
IVGCVSW-7564 Implement Cast operator for Opaque Delegate
* Adds VisitCast function to Redefine.hpp * Enables Cast Test for OpaqueUnitTests * Various Fixes to the opaque delegate to allow operator to run Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: I43d42eea5c987d6aed8a0f909a6bf583fddcc94e
Diffstat (limited to 'delegate/opaque/src/armnn_delegate.cpp')
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp69
1 files changed, 39 insertions, 30 deletions
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index ee1a4ed211..04a4eae12e 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -106,11 +106,14 @@ ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
}
-TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate)
+TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
{
+ // We are required to have the void* data parameter in the function signature, but we don't actually use it.
+ armnn::IgnoreUnused(data);
+
TfLiteIntArray* supportedOperators =
static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
- (tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
+ (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
if(supportedOperators == nullptr)
{
return kTfLiteError;
@@ -142,7 +145,7 @@ TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate*
ArmnnSubgraph::Create(tfLiteContext,
parameters,
static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
- parameters->delegate->data_)));
+ parameters->delegate->opaque_delegate_builder->data)));
}
);
@@ -366,7 +369,7 @@ TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
const int32_t tensorId = outputs->data[i];
const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
- if(!tensor)
+ if(!IsValid(tensor))
{
return kTfLiteError;
}
@@ -411,8 +414,7 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
std::vector<armnn::BindingPointInfo> outputBindings;
// Add input layer
- auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
- if (status != kTfLiteOk)
+ if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
{
throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
}
@@ -440,8 +442,7 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
<< std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
// Add Output layer
- status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
- if (status != kTfLiteOk)
+ if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
{
throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
}
@@ -544,24 +545,27 @@ TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
{
- // Prepare inputs
- armnn::InputTensors inputTensors;
- size_t inputIndex = 0;
- const int* inputs;
+ // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
+ // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
+ // each input slot in the node.
+ const int* inputIndexArray;
int numInputs;
- if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputs, &numInputs) != kTfLiteOk)
+ if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
{
throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
}
+ // Prepare inputs
+ armnn::InputTensors inputTensors;
+ size_t inputIndex = 0;
for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
{
- TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputs[inputIdx]);
+ TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
- if(!tensor)
+ if(!IsValid(tensor))
{
return kTfLiteError;
}
-
+ // If tensor is not read only
if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
{
const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
@@ -574,29 +578,29 @@ TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpa
}
}
- // Prepare outputs
- armnn::OutputTensors outputTensors;
- size_t outputIndex = 0;
- const int* outputs;
+ // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
+ // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
+ // each output slot in the node.
+ const int* outputIndexArray;
int numOutputs;
- if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputs, &numOutputs) != kTfLiteOk)
+ if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
{
throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
}
+ // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
+ armnn::OutputTensors outputTensors;
for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
{
- const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
- TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputs[outputIdx]);
-
- if(!tensor)
+ const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
+ TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
+ if(!IsValid(tensor))
{
return kTfLiteError;
}
- const armnn::Tensor outputTensor(outputBinding.second, TfLiteOpaqueTensorData(tensor));
- outputTensors.emplace_back(outputIdx, outputTensor);
-
- ++outputIndex;
+ const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
+ .data);
+ outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
}
// Run graph
@@ -618,9 +622,14 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
{
switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
{
+ case kTfLiteBuiltinCast:
+ return VisitCastOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinCast);
default:
return kTfLiteError;
}
}
-
} // armnnOpaqueDelegate namespace \ No newline at end of file