diff options
-rw-r--r-- | delegate/src/Control.hpp | 1 | ||||
-rw-r--r-- | delegate/src/Convolution.hpp | 12 | ||||
-rw-r--r-- | delegate/src/DelegateUtils.hpp | 2 | ||||
-rw-r--r-- | delegate/src/ElementwiseBinary.hpp | 4 | ||||
-rw-r--r-- | delegate/src/FullyConnected.hpp | 4 | ||||
-rw-r--r-- | delegate/src/Pooling.hpp | 7 |
6 files changed, 8 insertions, 22 deletions
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp index fd1fdee940..2f83d2a37e 100644 --- a/delegate/src/Control.hpp +++ b/delegate/src/Control.hpp @@ -126,7 +126,6 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData, // Verify we support the fused activation before attempting to create a layer TfLiteFusedActivation activationType = concatenationParameters->activation; - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp index 7ea3a3a987..17189027dc 100644 --- a/delegate/src/Convolution.hpp +++ b/delegate/src/Convolution.hpp @@ -102,12 +102,10 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData, const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType=kTfLiteActNone; if (tfLiteNodeParameters) { activationType = tfLiteNodeParameters->activation; - - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) @@ -279,12 +277,10 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData, const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType=kTfLiteActNone; if (tfLiteNodeParameters) { activationType = tfLiteNodeParameters->activation; - - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) @@ -490,12 +486,10 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData, const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType = kTfLiteActNone; if (tfLiteNodeParameters) { activationType = tfLiteNodeParameters->activation; - - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index 91447576d0..c0bef4f994 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -342,7 +342,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext, data.m_Backends, isSupported, setBackend, - prevLayer->GetOutputSlot(0).GetTensorInfo(), + activationOutputInfo, activationOutputInfo, activationDesc); if (!isSupported) diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp index f21d6afc28..8096acfefb 100644 --- a/delegate/src/ElementwiseBinary.hpp +++ b/delegate/src/ElementwiseBinary.hpp @@ -255,12 +255,10 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData, const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType = kTfLiteActNone; if (tfLiteNodeParameters) { activationType = tfLiteNodeParameters->activation; - - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp index ee553ce81c..337f1153a1 100644 --- a/delegate/src/FullyConnected.hpp +++ b/delegate/src/FullyConnected.hpp @@ -59,12 +59,10 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, // Check that we support fused activation before we attempt to create a layer auto* tfLiteNodeParameters = reinterpret_cast<TfLiteFullyConnectedParams *>(tfLiteNode->builtin_data); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType=kTfLiteActNone; if (tfLiteNodeParameters) { activationType = tfLiteNodeParameters->activation; - - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp index d0a73b433b..4dc8e0da98 100644 --- a/delegate/src/Pooling.hpp +++ b/delegate/src/Pooling.hpp @@ -50,12 +50,10 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData, const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType = kTfLiteActNone; if (tfLiteNodeParameters) { activationType = tfLiteNodeParameters->activation; - - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) @@ -233,7 +231,7 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData, // Check activation by parsing the string from the flexbuffer map std::string activationTypeStr = m["activation"].AsString().str(); - TfLiteFusedActivation activationType; + TfLiteFusedActivation activationType = kTfLiteActNone; if (activationTypeStr == "kTfLiteActRelu") { @@ -264,7 +262,6 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData, activationType = kTfLiteActNone; } - const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, outputTensorInfo, activationType); if(activationStatus != kTfLiteOk) |