aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/Quantization.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/src/Quantization.hpp')
-rw-r--r--delegate/src/Quantization.hpp132
1 files changed, 122 insertions, 10 deletions
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 31196233f9..4adbd11616 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -13,22 +13,134 @@
namespace armnnDelegate
{
+TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tfLiteDequantizeOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteDequantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLiteDequantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsDequantizeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outputTensorInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ ARMNN_ASSERT(dequantizeLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ return Connect(dequantizeLayer, tfLiteNode, delegateData);
+}
+
TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
- int32_t operatorCode)
+ int32_t tfLiteQuantizeOperatorCode)
{
- return kTfLiteError;
-}
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
-TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
- TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode,
- int nodeIndex,
- int32_t operatorCode)
-{
- return kTfLiteError;
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tfLiteQuantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tfLiteQuantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Only affine per-layer quantization is supported.
+ if (!IsAffineQuantization(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
+ tfLiteQuantizeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsQuantizeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outputTensorInfo);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ ARMNN_ASSERT(quantizeLayer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ return Connect(quantizeLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate