aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/Convolution.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/src/Convolution.hpp')
-rw-r--r--delegate/src/Convolution.hpp167
1 files changed, 167 insertions, 0 deletions
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 96612e0214..a7d6c1de26 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -204,6 +204,168 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs < 2)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
+ 2, numInputs, nodeIndex);
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ armnn::Convolution3dDescriptor descriptor;
+ const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
+
+ bool biasEnabled = tfLiteNode->inputs->size == 3 ? true : false;
+ descriptor.m_BiasEnabled = biasEnabled;
+ descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
+ descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
+ descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
+ descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
+ descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
+ descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
+ descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+
+ armnn::TensorInfo biasTensorInfo;
+ if(biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
+ }
+ else
+ {
+ biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
+ }
+
+ armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
+
+ // TfLite uses NDHWC tensors
+ const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
+
+ // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
+ const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ // Calculate padding
+ CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
+ descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
+ CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
+ descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+ CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
+ descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsConvolution3dSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor,
+ filterTensorInfo,
+ optionalBiasInfo);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ ARMNN_ASSERT(layer != nullptr);
+
+ // Add a constant layer for weights and biases if inputs are constant,
+ // which are connected to the Convolution3d layer as inputs.
+ if (tflite::IsConstantTensor(&tfLiteFilterTensor))
+ {
+ auto filter = CreateConstTensor(&tfLiteFilterTensor,
+ filterTensorInfo,
+ armnn::Optional<armnn::PermutationVector&>());
+
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ ARMNN_ASSERT(weightsLayer != nullptr);
+
+ weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+ weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+ }
+
+ if(biasEnabled)
+ {
+ const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+ {
+ auto biases = CreateConstTensor(&tfLiteBiasTensor,
+ biasTensorInfo,
+ armnn::Optional<armnn::PermutationVector&>());
+
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
+ ARMNN_ASSERT(biasLayer != nullptr);
+
+ biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
+ biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
+ }
+ }
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ Connect(layer, tfLiteNode, delegateData);
+
+ auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
+ if (!tfLiteNodeParameters)
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+
+ // Check activation
+ TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+}
+#endif
+
TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
@@ -581,6 +743,11 @@ TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
{
case kTfLiteBuiltinConv2d:
return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+ case kTfLiteBuiltinConv3d:
+ return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
+#endif
case kTfLiteBuiltinDepthwiseConv2d:
return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
case kTfLiteBuiltinTransposeConv: