aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2021-06-02 18:35:16 +0100
committerJim Flynn <jim.flynn@arm.com>2021-06-16 14:28:46 +0000
commitd3381d59d8067b55e4e52241fe1fca6ecd6e9763 (patch)
tree9e9a02550786a55f5cef46bc4b88b427fab29820
parenta20d2b8756fb0dea15b1f7620072e510f4977aeb (diff)
downloadandroid-nn-driver-d3381d59d8067b55e4e52241fe1fca6ecd6e9763.tar.gz
Add QUANT8_ASYMM_SIGNED in PadV2 for HAL1.3
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I597344d91975d7067f137e6587b751500de33837
-rw-r--r--ConversionUtils_1_2.hpp2
-rw-r--r--Utils.cpp47
-rw-r--r--Utils.hpp19
3 files changed, 59 insertions, 9 deletions
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index f884f7c0..c66a2f59 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -1573,7 +1573,7 @@ bool ConvertPadV2(const HalOperation& operation, const HalModel& model, Conversi
return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
}
}
- else if (operandType0 == HalOperandType::TENSOR_QUANT8_ASYMM && operandType2 == HalOperandType::INT32)
+ else if (isQuantizedOperand(operandType0) && operandType2 == HalOperandType::INT32)
{
int32_t intPadValue = 0;
if (!GetInputInt32<HalPolicy>(operation, 2, intPadValue, model, data))
diff --git a/Utils.cpp b/Utils.cpp
index 873dce4a..60e7a80c 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -603,6 +603,53 @@ bool AreDynamicTensorsSupported()
#endif
}
+bool isQuantizedOperand(const V1_0::OperandType& operandType)
+{
+ if (operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
+bool isQuantizedOperand(const V1_2::OperandType& operandType)
+{
+ if (operandType == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
+ operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
+ operandType == V1_2::OperandType::TENSOR_QUANT16_SYMM )
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+#endif
+
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+bool isQuantizedOperand(const V1_3::OperandType& operandType)
+{
+ if (operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
+ operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+ operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
+ operandType == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
+ operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+#endif
+
std::string GetFileTimestamp()
{
// used to get a timestamp to name diagnostic files (the ArmNN serialized graph
diff --git a/Utils.hpp b/Utils.hpp
index 893c4a08..da101535 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -70,22 +70,25 @@ void* GetMemoryFromPool(V1_0::DataLocation location,
/// Can throw UnsupportedOperand
armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand);
-#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
-armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
-#endif
-
-#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
-armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
-#endif
-
std::string GetOperandSummary(const V1_0::Operand& operand);
+// Returns true for any quantized data type, false for the rest.
+bool isQuantizedOperand(const V1_0::OperandType& operandType);
+
#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
+armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
+
std::string GetOperandSummary(const V1_2::Operand& operand);
+
+bool isQuantizedOperand(const V1_2::OperandType& operandType);
#endif
#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
+
std::string GetOperandSummary(const V1_3::Operand& operand);
+
+bool isQuantizedOperand(const V1_3::OperandType& operandType);
#endif
template <typename HalModel>